1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGObjCRuntime.h"
15#include "CGOpenCLRuntime.h"
16#include "CGRecordLayout.h"
17#include "CodeGenFunction.h"
18#include "CodeGenModule.h"
19#include "ConstantEmitter.h"
20#include "PatternInit.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/Decl.h"
25#include "clang/AST/OSLog.h"
26#include "clang/Basic/TargetBuiltins.h"
27#include "clang/Basic/TargetInfo.h"
28#include "clang/CodeGen/CGFunctionInfo.h"
29#include "llvm/ADT/SmallPtrSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/Analysis/ValueTracking.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/IntrinsicsAArch64.h"
36#include "llvm/IR/IntrinsicsAMDGPU.h"
37#include "llvm/IR/IntrinsicsARM.h"
38#include "llvm/IR/IntrinsicsBPF.h"
39#include "llvm/IR/IntrinsicsHexagon.h"
40#include "llvm/IR/IntrinsicsNVPTX.h"
41#include "llvm/IR/IntrinsicsPowerPC.h"
42#include "llvm/IR/IntrinsicsR600.h"
43#include "llvm/IR/IntrinsicsS390.h"
44#include "llvm/IR/IntrinsicsWebAssembly.h"
45#include "llvm/IR/IntrinsicsX86.h"
46#include "llvm/IR/MDBuilder.h"
47#include "llvm/IR/MatrixBuilder.h"
48#include "llvm/Support/ConvertUTF.h"
49#include "llvm/Support/ScopedPrinter.h"
50#include "llvm/Support/X86TargetParser.h"
51#include <sstream>
52
53using namespace clang;
54using namespace CodeGen;
55using namespace llvm;
56
57static
58int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
59 return std::min(High, std::max(Low, Value));
60}
61
62static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
63 Align AlignmentInBytes) {
64 ConstantInt *Byte;
65 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
66 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
67 // Nothing to initialize.
68 return;
69 case LangOptions::TrivialAutoVarInitKind::Zero:
70 Byte = CGF.Builder.getInt8(0x00);
71 break;
72 case LangOptions::TrivialAutoVarInitKind::Pattern: {
73 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
74 Byte = llvm::dyn_cast<llvm::ConstantInt>(
75 initializationPatternFor(CGF.CGM, Int8));
76 break;
77 }
78 }
79 if (CGF.CGM.stopAutoInit())
80 return;
81 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
82 I->addAnnotationMetadata("auto-init");
83}
84
85/// getBuiltinLibFunction - Given a builtin id for a function like
86/// "__builtin_fabsf", return a Function* for "fabsf".
87llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
88 unsigned BuiltinID) {
89 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
90
91 // Get the name, skip over the __builtin_ prefix (if necessary).
92 StringRef Name;
93 GlobalDecl D(FD);
94
95 // If the builtin has been declared explicitly with an assembler label,
96 // use the mangled name. This differs from the plain label on platforms
97 // that prefix labels.
98 if (FD->hasAttr<AsmLabelAttr>())
99 Name = getMangledName(D);
100 else
101 Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
102
103 llvm::FunctionType *Ty =
104 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
105
106 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
107}
108
109/// Emit the conversions required to turn the given value into an
110/// integer of the given size.
111static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
112 QualType T, llvm::IntegerType *IntType) {
113 V = CGF.EmitToMemory(V, T);
114
115 if (V->getType()->isPointerTy())
116 return CGF.Builder.CreatePtrToInt(V, IntType);
117
118 assert(V->getType() == IntType);
119 return V;
120}
121
122static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
123 QualType T, llvm::Type *ResultType) {
124 V = CGF.EmitFromMemory(V, T);
125
126 if (ResultType->isPointerTy())
127 return CGF.Builder.CreateIntToPtr(V, ResultType);
128
129 assert(V->getType() == ResultType);
130 return V;
131}
132
133/// Utility to insert an atomic instruction based on Intrinsic::ID
134/// and the expression node.
135static Value *MakeBinaryAtomicValue(
136 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
137 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
138 QualType T = E->getType();
139 assert(E->getArg(0)->getType()->isPointerType());
140 assert(CGF.getContext().hasSameUnqualifiedType(T,
141 E->getArg(0)->getType()->getPointeeType()));
142 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
143
144 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
145 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
146
147 llvm::IntegerType *IntType =
148 llvm::IntegerType::get(CGF.getLLVMContext(),
149 CGF.getContext().getTypeSize(T));
150 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
151
152 llvm::Value *Args[2];
153 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
154 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
155 llvm::Type *ValueType = Args[1]->getType();
156 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
157
158 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
159 Kind, Args[0], Args[1], Ordering);
160 return EmitFromInt(CGF, Result, T, ValueType);
161}
162
163static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
164 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
165 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
166
167 // Convert the type of the pointer to a pointer to the stored type.
168 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
169 Value *BC = CGF.Builder.CreateBitCast(
170 Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
171 LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
172 LV.setNontemporal(true);
173 CGF.EmitStoreOfScalar(Val, LV, false);
174 return nullptr;
175}
176
177static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
178 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
179
180 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
181 LV.setNontemporal(true);
182 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
183}
184
185static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
186 llvm::AtomicRMWInst::BinOp Kind,
187 const CallExpr *E) {
188 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
189}
190
191/// Utility to insert an atomic instruction based Intrinsic::ID and
192/// the expression node, where the return value is the result of the
193/// operation.
194static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
195 llvm::AtomicRMWInst::BinOp Kind,
196 const CallExpr *E,
197 Instruction::BinaryOps Op,
198 bool Invert = false) {
199 QualType T = E->getType();
200 assert(E->getArg(0)->getType()->isPointerType());
201 assert(CGF.getContext().hasSameUnqualifiedType(T,
202 E->getArg(0)->getType()->getPointeeType()));
203 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
204
205 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
206 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
207
208 llvm::IntegerType *IntType =
209 llvm::IntegerType::get(CGF.getLLVMContext(),
210 CGF.getContext().getTypeSize(T));
211 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
212
213 llvm::Value *Args[2];
214 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
215 llvm::Type *ValueType = Args[1]->getType();
216 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
217 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
218
219 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
220 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
221 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
222 if (Invert)
223 Result =
224 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
225 llvm::ConstantInt::getAllOnesValue(IntType));
226 Result = EmitFromInt(CGF, Result, T, ValueType);
227 return RValue::get(Result);
228}
229
230/// Utility to insert an atomic cmpxchg instruction.
231///
232/// @param CGF The current codegen function.
233/// @param E Builtin call expression to convert to cmpxchg.
234/// arg0 - address to operate on
235/// arg1 - value to compare with
236/// arg2 - new value
237/// @param ReturnBool Specifies whether to return success flag of
238/// cmpxchg result or the old value.
239///
240/// @returns result of cmpxchg, according to ReturnBool
241///
242/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
243/// invoke the function EmitAtomicCmpXchgForMSIntrin.
244static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
245 bool ReturnBool) {
246 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
247 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
248 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
249
250 llvm::IntegerType *IntType = llvm::IntegerType::get(
251 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
252 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
253
254 Value *Args[3];
255 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
256 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
257 llvm::Type *ValueType = Args[1]->getType();
258 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
259 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
260
261 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
262 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
263 llvm::AtomicOrdering::SequentiallyConsistent);
264 if (ReturnBool)
265 // Extract boolean success flag and zext it to int.
266 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
267 CGF.ConvertType(E->getType()));
268 else
269 // Extract old value and emit it using the same type as compare value.
270 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
271 ValueType);
272}
273
274/// This function should be invoked to emit atomic cmpxchg for Microsoft's
275/// _InterlockedCompareExchange* intrinsics which have the following signature:
276/// T _InterlockedCompareExchange(T volatile *Destination,
277/// T Exchange,
278/// T Comparand);
279///
280/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
281/// cmpxchg *Destination, Comparand, Exchange.
282/// So we need to swap Comparand and Exchange when invoking
283/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
284/// function MakeAtomicCmpXchgValue since it expects the arguments to be
285/// already swapped.
286
287static
288Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
289 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
290 assert(E->getArg(0)->getType()->isPointerType());
291 assert(CGF.getContext().hasSameUnqualifiedType(
292 E->getType(), E->getArg(0)->getType()->getPointeeType()));
293 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
294 E->getArg(1)->getType()));
295 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
296 E->getArg(2)->getType()));
297
298 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
299 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
300 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
301
302 // For Release ordering, the failure ordering should be Monotonic.
303 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
304 AtomicOrdering::Monotonic :
305 SuccessOrdering;
306
307 // The atomic instruction is marked volatile for consistency with MSVC. This
308 // blocks the few atomics optimizations that LLVM has. If we want to optimize
309 // _Interlocked* operations in the future, we will have to remove the volatile
310 // marker.
311 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
312 Destination, Comparand, Exchange,
313 SuccessOrdering, FailureOrdering);
314 Result->setVolatile(true);
315 return CGF.Builder.CreateExtractValue(Result, 0);
316}
317
318// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
319// prototyped like this:
320//
321// unsigned char _InterlockedCompareExchange128...(
322// __int64 volatile * _Destination,
323// __int64 _ExchangeHigh,
324// __int64 _ExchangeLow,
325// __int64 * _ComparandResult);
326static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
327 const CallExpr *E,
328 AtomicOrdering SuccessOrdering) {
329 assert(E->getNumArgs() == 4);
330 llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0));
331 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
332 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
333 llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3));
334
335 assert(Destination->getType()->isPointerTy());
336 assert(!ExchangeHigh->getType()->isPointerTy());
337 assert(!ExchangeLow->getType()->isPointerTy());
338 assert(ComparandPtr->getType()->isPointerTy());
339
340 // For Release ordering, the failure ordering should be Monotonic.
341 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
342 ? AtomicOrdering::Monotonic
343 : SuccessOrdering;
344
345 // Convert to i128 pointers and values.
346 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
347 llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
348 Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy);
349 Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy),
350 CGF.getContext().toCharUnitsFromBits(128));
351
352 // (((i128)hi) << 64) | ((i128)lo)
353 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
354 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
355 ExchangeHigh =
356 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
357 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
358
359 // Load the comparand for the instruction.
360 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult);
361
362 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
363 SuccessOrdering, FailureOrdering);
364
365 // The atomic instruction is marked volatile for consistency with MSVC. This
366 // blocks the few atomics optimizations that LLVM has. If we want to optimize
367 // _Interlocked* operations in the future, we will have to remove the volatile
368 // marker.
369 CXI->setVolatile(true);
370
371 // Store the result as an outparameter.
372 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
373 ComparandResult);
374
375 // Get the success boolean and zero extend it to i8.
376 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
377 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
378}
379
380static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
381 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
382 assert(E->getArg(0)->getType()->isPointerType());
383
384 auto *IntTy = CGF.ConvertType(E->getType());
385 auto *Result = CGF.Builder.CreateAtomicRMW(
386 AtomicRMWInst::Add,
387 CGF.EmitScalarExpr(E->getArg(0)),
388 ConstantInt::get(IntTy, 1),
389 Ordering);
390 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
391}
392
393static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
394 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
395 assert(E->getArg(0)->getType()->isPointerType());
396
397 auto *IntTy = CGF.ConvertType(E->getType());
398 auto *Result = CGF.Builder.CreateAtomicRMW(
399 AtomicRMWInst::Sub,
400 CGF.EmitScalarExpr(E->getArg(0)),
401 ConstantInt::get(IntTy, 1),
402 Ordering);
403 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
404}
405
406// Build a plain volatile load.
407static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
408 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
409 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
410 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
411 llvm::Type *ITy =
412 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
413 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
414 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize);
415 Load->setVolatile(true);
416 return Load;
417}
418
419// Build a plain volatile store.
420static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
421 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
422 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
423 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
424 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
425 llvm::Type *ITy =
426 llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
427 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
428 llvm::StoreInst *Store =
429 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
430 Store->setVolatile(true);
431 return Store;
432}
433
434// Emit a simple mangled intrinsic that has 1 argument and a return type
435// matching the argument type. Depending on mode, this may be a constrained
436// floating-point intrinsic.
437static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
438 const CallExpr *E, unsigned IntrinsicID,
439 unsigned ConstrainedIntrinsicID) {
440 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
441
442 if (CGF.Builder.getIsFPConstrained()) {
443 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
444 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
445 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
446 } else {
447 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
448 return CGF.Builder.CreateCall(F, Src0);
449 }
450}
451
452// Emit an intrinsic that has 2 operands of the same type as its result.
453// Depending on mode, this may be a constrained floating-point intrinsic.
454static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
455 const CallExpr *E, unsigned IntrinsicID,
456 unsigned ConstrainedIntrinsicID) {
457 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
458 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
459
460 if (CGF.Builder.getIsFPConstrained()) {
461 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
462 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
463 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
464 } else {
465 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
466 return CGF.Builder.CreateCall(F, { Src0, Src1 });
467 }
468}
469
470// Emit an intrinsic that has 3 operands of the same type as its result.
471// Depending on mode, this may be a constrained floating-point intrinsic.
472static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
473 const CallExpr *E, unsigned IntrinsicID,
474 unsigned ConstrainedIntrinsicID) {
475 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
476 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
477 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
478
479 if (CGF.Builder.getIsFPConstrained()) {
480 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
481 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
482 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
483 } else {
484 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
485 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
486 }
487}
488
489// Emit an intrinsic where all operands are of the same type as the result.
490// Depending on mode, this may be a constrained floating-point intrinsic.
491static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
492 unsigned IntrinsicID,
493 unsigned ConstrainedIntrinsicID,
494 llvm::Type *Ty,
495 ArrayRef<Value *> Args) {
496 Function *F;
497 if (CGF.Builder.getIsFPConstrained())
498 F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
499 else
500 F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
501
502 if (CGF.Builder.getIsFPConstrained())
503 return CGF.Builder.CreateConstrainedFPCall(F, Args);
504 else
505 return CGF.Builder.CreateCall(F, Args);
506}
507
508// Emit a simple mangled intrinsic that has 1 argument and a return type
509// matching the argument type.
510static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
511 const CallExpr *E,
512 unsigned IntrinsicID) {
513 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
514
515 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
516 return CGF.Builder.CreateCall(F, Src0);
517}
518
519// Emit an intrinsic that has 2 operands of the same type as its result.
520static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
521 const CallExpr *E,
522 unsigned IntrinsicID) {
523 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
524 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
525
526 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
527 return CGF.Builder.CreateCall(F, { Src0, Src1 });
528}
529
530// Emit an intrinsic that has 3 operands of the same type as its result.
531static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
532 const CallExpr *E,
533 unsigned IntrinsicID) {
534 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
535 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
536 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
537
538 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
539 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
540}
541
542// Emit an intrinsic that has 1 float or double operand, and 1 integer.
543static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
544 const CallExpr *E,
545 unsigned IntrinsicID) {
546 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
547 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
548
549 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
550 return CGF.Builder.CreateCall(F, {Src0, Src1});
551}
552
553// Emit an intrinsic that has overloaded integer result and fp operand.
554static Value *
555emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
556 unsigned IntrinsicID,
557 unsigned ConstrainedIntrinsicID) {
558 llvm::Type *ResultType = CGF.ConvertType(E->getType());
559 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
560
561 if (CGF.Builder.getIsFPConstrained()) {
562 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
563 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
564 {ResultType, Src0->getType()});
565 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
566 } else {
567 Function *F =
568 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
569 return CGF.Builder.CreateCall(F, Src0);
570 }
571}
572
573/// EmitFAbs - Emit a call to @llvm.fabs().
574static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
575 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
576 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
577 Call->setDoesNotAccessMemory();
578 return Call;
579}
580
581/// Emit the computation of the sign bit for a floating point value. Returns
582/// the i1 sign bit value.
583static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
584 LLVMContext &C = CGF.CGM.getLLVMContext();
585
586 llvm::Type *Ty = V->getType();
587 int Width = Ty->getPrimitiveSizeInBits();
588 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
589 V = CGF.Builder.CreateBitCast(V, IntTy);
590 if (Ty->isPPC_FP128Ty()) {
591 // We want the sign bit of the higher-order double. The bitcast we just
592 // did works as if the double-double was stored to memory and then
593 // read as an i128. The "store" will put the higher-order double in the
594 // lower address in both little- and big-Endian modes, but the "load"
595 // will treat those bits as a different part of the i128: the low bits in
596 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
597 // we need to shift the high bits down to the low before truncating.
598 Width >>= 1;
599 if (CGF.getTarget().isBigEndian()) {
600 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
601 V = CGF.Builder.CreateLShr(V, ShiftCst);
602 }
603 // We are truncating value in order to extract the higher-order
604 // double, which we will be using to extract the sign from.
605 IntTy = llvm::IntegerType::get(C, Width);
606 V = CGF.Builder.CreateTrunc(V, IntTy);
607 }
608 Value *Zero = llvm::Constant::getNullValue(IntTy);
609 return CGF.Builder.CreateICmpSLT(V, Zero);
610}
611
612static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
613 const CallExpr *E, llvm::Constant *calleeValue) {
614 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
615 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
616}
617
618/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
619/// depending on IntrinsicID.
620///
621/// \arg CGF The current codegen function.
622/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
623/// \arg X The first argument to the llvm.*.with.overflow.*.
624/// \arg Y The second argument to the llvm.*.with.overflow.*.
625/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
626/// \returns The result (i.e. sum/product) returned by the intrinsic.
627static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
628 const llvm::Intrinsic::ID IntrinsicID,
629 llvm::Value *X, llvm::Value *Y,
630 llvm::Value *&Carry) {
631 // Make sure we have integers of the same width.
632 assert(X->getType() == Y->getType() &&
633 "Arguments must be the same type. (Did you forget to make sure both "
634 "arguments have the same integer width?)");
635
636 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
637 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
638 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
639 return CGF.Builder.CreateExtractValue(Tmp, 0);
640}
641
642static Value *emitRangedBuiltin(CodeGenFunction &CGF,
643 unsigned IntrinsicID,
644 int low, int high) {
645 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
646 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
647 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
648 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
649 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
650 return Call;
651}
652
653namespace {
654 struct WidthAndSignedness {
655 unsigned Width;
656 bool Signed;
657 };
658}
659
660static WidthAndSignedness
661getIntegerWidthAndSignedness(const clang::ASTContext &context,
662 const clang::QualType Type) {
663 assert(Type->isIntegerType() && "Given type is not an integer.");
664 unsigned Width = Type->isBooleanType() ? 1
665 : Type->isExtIntType() ? context.getIntWidth(Type)
666 : context.getTypeInfo(Type).Width;
667 bool Signed = Type->isSignedIntegerType();
668 return {Width, Signed};
669}
670
671// Given one or more integer types, this function produces an integer type that
672// encompasses them: any value in one of the given types could be expressed in
673// the encompassing type.
674static struct WidthAndSignedness
675EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
676 assert(Types.size() > 0 && "Empty list of types.");
677
678 // If any of the given types is signed, we must return a signed type.
679 bool Signed = false;
680 for (const auto &Type : Types) {
681 Signed |= Type.Signed;
682 }
683
684 // The encompassing type must have a width greater than or equal to the width
685 // of the specified types. Additionally, if the encompassing type is signed,
686 // its width must be strictly greater than the width of any unsigned types
687 // given.
688 unsigned Width = 0;
689 for (const auto &Type : Types) {
690 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
691 if (Width < MinWidth) {
692 Width = MinWidth;
693 }
694 }
695
696 return {Width, Signed};
697}
698
699Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
700 llvm::Type *DestType = Int8PtrTy;
701 if (ArgValue->getType() != DestType)
702 ArgValue =
703 Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
704
705 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
706 return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
707}
708
709/// Checks if using the result of __builtin_object_size(p, @p From) in place of
710/// __builtin_object_size(p, @p To) is correct
711static bool areBOSTypesCompatible(int From, int To) {
712 // Note: Our __builtin_object_size implementation currently treats Type=0 and
713 // Type=2 identically. Encoding this implementation detail here may make
714 // improving __builtin_object_size difficult in the future, so it's omitted.
715 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
716}
717
718static llvm::Value *
719getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
720 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
721}
722
723llvm::Value *
724CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
725 llvm::IntegerType *ResType,
726 llvm::Value *EmittedE,
727 bool IsDynamic) {
728 uint64_t ObjectSize;
729 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
730 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
731 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
732}
733
734/// Returns a Value corresponding to the size of the given expression.
735/// This Value may be either of the following:
736/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
737/// it)
738/// - A call to the @llvm.objectsize intrinsic
739///
740/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
741/// and we wouldn't otherwise try to reference a pass_object_size parameter,
742/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
743llvm::Value *
744CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
745 llvm::IntegerType *ResType,
746 llvm::Value *EmittedE, bool IsDynamic) {
747 // We need to reference an argument if the pointer is a parameter with the
748 // pass_object_size attribute.
749 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
750 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
751 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
752 if (Param != nullptr && PS != nullptr &&
753 areBOSTypesCompatible(PS->getType(), Type)) {
754 auto Iter = SizeArguments.find(Param);
755 assert(Iter != SizeArguments.end());
756
757 const ImplicitParamDecl *D = Iter->second;
758 auto DIter = LocalDeclMap.find(D);
759 assert(DIter != LocalDeclMap.end());
760
761 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
762 getContext().getSizeType(), E->getBeginLoc());
763 }
764 }
765
766 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
767 // evaluate E for side-effects. In either case, we shouldn't lower to
768 // @llvm.objectsize.
769 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
770 return getDefaultBuiltinObjectSizeResult(Type, ResType);
771
772 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
773 assert(Ptr->getType()->isPointerTy() &&
774 "Non-pointer passed to __builtin_object_size?");
775
776 Function *F =
777 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
778
779 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
780 Value *Min = Builder.getInt1((Type & 2) != 0);
781 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
782 Value *NullIsUnknown = Builder.getTrue();
783 Value *Dynamic = Builder.getInt1(IsDynamic);
784 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
785}
786
787namespace {
788/// A struct to generically describe a bit test intrinsic.
789struct BitTest {
790 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
791 enum InterlockingKind : uint8_t {
792 Unlocked,
793 Sequential,
794 Acquire,
795 Release,
796 NoFence
797 };
798
799 ActionKind Action;
800 InterlockingKind Interlocking;
801 bool Is64Bit;
802
803 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
804};
805} // namespace
806
807BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
808 switch (BuiltinID) {
809 // Main portable variants.
810 case Builtin::BI_bittest:
811 return {TestOnly, Unlocked, false};
812 case Builtin::BI_bittestandcomplement:
813 return {Complement, Unlocked, false};
814 case Builtin::BI_bittestandreset:
815 return {Reset, Unlocked, false};
816 case Builtin::BI_bittestandset:
817 return {Set, Unlocked, false};
818 case Builtin::BI_interlockedbittestandreset:
819 return {Reset, Sequential, false};
820 case Builtin::BI_interlockedbittestandset:
821 return {Set, Sequential, false};
822
823 // X86-specific 64-bit variants.
824 case Builtin::BI_bittest64:
825 return {TestOnly, Unlocked, true};
826 case Builtin::BI_bittestandcomplement64:
827 return {Complement, Unlocked, true};
828 case Builtin::BI_bittestandreset64:
829 return {Reset, Unlocked, true};
830 case Builtin::BI_bittestandset64:
831 return {Set, Unlocked, true};
832 case Builtin::BI_interlockedbittestandreset64:
833 return {Reset, Sequential, true};
834 case Builtin::BI_interlockedbittestandset64:
835 return {Set, Sequential, true};
836
837 // ARM/AArch64-specific ordering variants.
838 case Builtin::BI_interlockedbittestandset_acq:
839 return {Set, Acquire, false};
840 case Builtin::BI_interlockedbittestandset_rel:
841 return {Set, Release, false};
842 case Builtin::BI_interlockedbittestandset_nf:
843 return {Set, NoFence, false};
844 case Builtin::BI_interlockedbittestandreset_acq:
845 return {Reset, Acquire, false};
846 case Builtin::BI_interlockedbittestandreset_rel:
847 return {Reset, Release, false};
848 case Builtin::BI_interlockedbittestandreset_nf:
849 return {Reset, NoFence, false};
850 }
851 llvm_unreachable("expected only bittest intrinsics");
852}
853
854static char bitActionToX86BTCode(BitTest::ActionKind A) {
855 switch (A) {
856 case BitTest::TestOnly: return '\0';
857 case BitTest::Complement: return 'c';
858 case BitTest::Reset: return 'r';
859 case BitTest::Set: return 's';
860 }
861 llvm_unreachable("invalid action");
862}
863
864static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
865 BitTest BT,
866 const CallExpr *E, Value *BitBase,
867 Value *BitPos) {
868 char Action = bitActionToX86BTCode(BT.Action);
869 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
870
871 // Build the assembly.
872 SmallString<64> Asm;
873 raw_svector_ostream AsmOS(Asm);
874 if (BT.Interlocking != BitTest::Unlocked)
875 AsmOS << "lock ";
876 AsmOS << "bt";
877 if (Action)
878 AsmOS << Action;
879 AsmOS << SizeSuffix << " $2, ($1)";
880
881 // Build the constraints. FIXME: We should support immediates when possible.
882 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
883 std::string MachineClobbers = CGF.getTarget().getClobbers();
884 if (!MachineClobbers.empty()) {
885 Constraints += ',';
886 Constraints += MachineClobbers;
887 }
888 llvm::IntegerType *IntType = llvm::IntegerType::get(
889 CGF.getLLVMContext(),
890 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
891 llvm::Type *IntPtrType = IntType->getPointerTo();
892 llvm::FunctionType *FTy =
893 llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
894
895 llvm::InlineAsm *IA =
896 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
897 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
898}
899
900static llvm::AtomicOrdering
901getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
902 switch (I) {
903 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
904 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
905 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
906 case BitTest::Release: return llvm::AtomicOrdering::Release;
907 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
908 }
909 llvm_unreachable("invalid interlocking");
910}
911
912/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
913/// bits and a bit position and read and optionally modify the bit at that
914/// position. The position index can be arbitrarily large, i.e. it can be larger
915/// than 31 or 63, so we need an indexed load in the general case.
916static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
917 unsigned BuiltinID,
918 const CallExpr *E) {
919 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
920 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
921
922 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
923
924 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
925 // indexing operation internally. Use them if possible.
926 if (CGF.getTarget().getTriple().isX86())
927 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
928
929 // Otherwise, use generic code to load one byte and test the bit. Use all but
930 // the bottom three bits as the array index, and the bottom three bits to form
931 // a mask.
932 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
933 Value *ByteIndex = CGF.Builder.CreateAShr(
934 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
935 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
936 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
937 ByteIndex, "bittest.byteaddr"),
938 CharUnits::One());
939 Value *PosLow =
940 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
941 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
942
943 // The updating instructions will need a mask.
944 Value *Mask = nullptr;
945 if (BT.Action != BitTest::TestOnly) {
946 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
947 "bittest.mask");
948 }
949
950 // Check the action and ordering of the interlocked intrinsics.
951 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
952
953 Value *OldByte = nullptr;
954 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
955 // Emit a combined atomicrmw load/store operation for the interlocked
956 // intrinsics.
957 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
958 if (BT.Action == BitTest::Reset) {
959 Mask = CGF.Builder.CreateNot(Mask);
960 RMWOp = llvm::AtomicRMWInst::And;
961 }
962 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
963 Ordering);
964 } else {
965 // Emit a plain load for the non-interlocked intrinsics.
966 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
967 Value *NewByte = nullptr;
968 switch (BT.Action) {
969 case BitTest::TestOnly:
970 // Don't store anything.
971 break;
972 case BitTest::Complement:
973 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
974 break;
975 case BitTest::Reset:
976 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
977 break;
978 case BitTest::Set:
979 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
980 break;
981 }
982 if (NewByte)
983 CGF.Builder.CreateStore(NewByte, ByteAddr);
984 }
985
986 // However we loaded the old byte, either by plain load or atomicrmw, shift
987 // the bit into the low position and mask it to 0 or 1.
988 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
989 return CGF.Builder.CreateAnd(
990 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
991}
992
993namespace {
994enum class MSVCSetJmpKind {
995 _setjmpex,
996 _setjmp3,
997 _setjmp
998};
999}
1000
1001/// MSVC handles setjmp a bit differently on different platforms. On every
1002/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1003/// parameters can be passed as variadic arguments, but we always pass none.
1004static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1005 const CallExpr *E) {
1006 llvm::Value *Arg1 = nullptr;
1007 llvm::Type *Arg1Ty = nullptr;
1008 StringRef Name;
1009 bool IsVarArg = false;
1010 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1011 Name = "_setjmp3";
1012 Arg1Ty = CGF.Int32Ty;
1013 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1014 IsVarArg = true;
1015 } else {
1016 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1017 Arg1Ty = CGF.Int8PtrTy;
1018 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1019 Arg1 = CGF.Builder.CreateCall(
1020 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1021 } else
1022 Arg1 = CGF.Builder.CreateCall(
1023 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1024 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1025 }
1026
1027 // Mark the call site and declaration with ReturnsTwice.
1028 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1029 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1030 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1031 llvm::Attribute::ReturnsTwice);
1032 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1033 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1034 ReturnsTwiceAttr, /*Local=*/true);
1035
1036 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1037 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1038 llvm::Value *Args[] = {Buf, Arg1};
1039 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1040 CB->setAttributes(ReturnsTwiceAttr);
1041 return RValue::get(CB);
1042}
1043
1044// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
1045// we handle them here.
1046enum class CodeGenFunction::MSVCIntrin {
1047 _BitScanForward,
1048 _BitScanReverse,
1049 _InterlockedAnd,
1050 _InterlockedDecrement,
1051 _InterlockedExchange,
1052 _InterlockedExchangeAdd,
1053 _InterlockedExchangeSub,
1054 _InterlockedIncrement,
1055 _InterlockedOr,
1056 _InterlockedXor,
1057 _InterlockedExchangeAdd_acq,
1058 _InterlockedExchangeAdd_rel,
1059 _InterlockedExchangeAdd_nf,
1060 _InterlockedExchange_acq,
1061 _InterlockedExchange_rel,
1062 _InterlockedExchange_nf,
1063 _InterlockedCompareExchange_acq,
1064 _InterlockedCompareExchange_rel,
1065 _InterlockedCompareExchange_nf,
1066 _InterlockedCompareExchange128,
1067 _InterlockedCompareExchange128_acq,
1068 _InterlockedCompareExchange128_rel,
1069 _InterlockedCompareExchange128_nf,
1070 _InterlockedOr_acq,
1071 _InterlockedOr_rel,
1072 _InterlockedOr_nf,
1073 _InterlockedXor_acq,
1074 _InterlockedXor_rel,
1075 _InterlockedXor_nf,
1076 _InterlockedAnd_acq,
1077 _InterlockedAnd_rel,
1078 _InterlockedAnd_nf,
1079 _InterlockedIncrement_acq,
1080 _InterlockedIncrement_rel,
1081 _InterlockedIncrement_nf,
1082 _InterlockedDecrement_acq,
1083 _InterlockedDecrement_rel,
1084 _InterlockedDecrement_nf,
1085 __fastfail,
1086};
1087
1088static Optional<CodeGenFunction::MSVCIntrin>
1089translateArmToMsvcIntrin(unsigned BuiltinID) {
1090 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1091 switch (BuiltinID) {
1092 default:
1093 return None;
1094 case ARM::BI_BitScanForward:
1095 case ARM::BI_BitScanForward64:
1096 return MSVCIntrin::_BitScanForward;
1097 case ARM::BI_BitScanReverse:
1098 case ARM::BI_BitScanReverse64:
1099 return MSVCIntrin::_BitScanReverse;
1100 case ARM::BI_InterlockedAnd64:
1101 return MSVCIntrin::_InterlockedAnd;
1102 case ARM::BI_InterlockedExchange64:
1103 return MSVCIntrin::_InterlockedExchange;
1104 case ARM::BI_InterlockedExchangeAdd64:
1105 return MSVCIntrin::_InterlockedExchangeAdd;
1106 case ARM::BI_InterlockedExchangeSub64:
1107 return MSVCIntrin::_InterlockedExchangeSub;
1108 case ARM::BI_InterlockedOr64:
1109 return MSVCIntrin::_InterlockedOr;
1110 case ARM::BI_InterlockedXor64:
1111 return MSVCIntrin::_InterlockedXor;
1112 case ARM::BI_InterlockedDecrement64:
1113 return MSVCIntrin::_InterlockedDecrement;
1114 case ARM::BI_InterlockedIncrement64:
1115 return MSVCIntrin::_InterlockedIncrement;
1116 case ARM::BI_InterlockedExchangeAdd8_acq:
1117 case ARM::BI_InterlockedExchangeAdd16_acq:
1118 case ARM::BI_InterlockedExchangeAdd_acq:
1119 case ARM::BI_InterlockedExchangeAdd64_acq:
1120 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1121 case ARM::BI_InterlockedExchangeAdd8_rel:
1122 case ARM::BI_InterlockedExchangeAdd16_rel:
1123 case ARM::BI_InterlockedExchangeAdd_rel:
1124 case ARM::BI_InterlockedExchangeAdd64_rel:
1125 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1126 case ARM::BI_InterlockedExchangeAdd8_nf:
1127 case ARM::BI_InterlockedExchangeAdd16_nf:
1128 case ARM::BI_InterlockedExchangeAdd_nf:
1129 case ARM::BI_InterlockedExchangeAdd64_nf:
1130 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1131 case ARM::BI_InterlockedExchange8_acq:
1132 case ARM::BI_InterlockedExchange16_acq:
1133 case ARM::BI_InterlockedExchange_acq:
1134 case ARM::BI_InterlockedExchange64_acq:
1135 return MSVCIntrin::_InterlockedExchange_acq;
1136 case ARM::BI_InterlockedExchange8_rel:
1137 case ARM::BI_InterlockedExchange16_rel:
1138 case ARM::BI_InterlockedExchange_rel:
1139 case ARM::BI_InterlockedExchange64_rel:
1140 return MSVCIntrin::_InterlockedExchange_rel;
1141 case ARM::BI_InterlockedExchange8_nf:
1142 case ARM::BI_InterlockedExchange16_nf:
1143 case ARM::BI_InterlockedExchange_nf:
1144 case ARM::BI_InterlockedExchange64_nf:
1145 return MSVCIntrin::_InterlockedExchange_nf;
1146 case ARM::BI_InterlockedCompareExchange8_acq:
1147 case ARM::BI_InterlockedCompareExchange16_acq:
1148 case ARM::BI_InterlockedCompareExchange_acq:
1149 case ARM::BI_InterlockedCompareExchange64_acq:
1150 return MSVCIntrin::_InterlockedCompareExchange_acq;
1151 case ARM::BI_InterlockedCompareExchange8_rel:
1152 case ARM::BI_InterlockedCompareExchange16_rel:
1153 case ARM::BI_InterlockedCompareExchange_rel:
1154 case ARM::BI_InterlockedCompareExchange64_rel:
1155 return MSVCIntrin::_InterlockedCompareExchange_rel;
1156 case ARM::BI_InterlockedCompareExchange8_nf:
1157 case ARM::BI_InterlockedCompareExchange16_nf:
1158 case ARM::BI_InterlockedCompareExchange_nf:
1159 case ARM::BI_InterlockedCompareExchange64_nf:
1160 return MSVCIntrin::_InterlockedCompareExchange_nf;
1161 case ARM::BI_InterlockedOr8_acq:
1162 case ARM::BI_InterlockedOr16_acq:
1163 case ARM::BI_InterlockedOr_acq:
1164 case ARM::BI_InterlockedOr64_acq:
1165 return MSVCIntrin::_InterlockedOr_acq;
1166 case ARM::BI_InterlockedOr8_rel:
1167 case ARM::BI_InterlockedOr16_rel:
1168 case ARM::BI_InterlockedOr_rel:
1169 case ARM::BI_InterlockedOr64_rel:
1170 return MSVCIntrin::_InterlockedOr_rel;
1171 case ARM::BI_InterlockedOr8_nf:
1172 case ARM::BI_InterlockedOr16_nf:
1173 case ARM::BI_InterlockedOr_nf:
1174 case ARM::BI_InterlockedOr64_nf:
1175 return MSVCIntrin::_InterlockedOr_nf;
1176 case ARM::BI_InterlockedXor8_acq:
1177 case ARM::BI_InterlockedXor16_acq:
1178 case ARM::BI_InterlockedXor_acq:
1179 case ARM::BI_InterlockedXor64_acq:
1180 return MSVCIntrin::_InterlockedXor_acq;
1181 case ARM::BI_InterlockedXor8_rel:
1182 case ARM::BI_InterlockedXor16_rel:
1183 case ARM::BI_InterlockedXor_rel:
1184 case ARM::BI_InterlockedXor64_rel:
1185 return MSVCIntrin::_InterlockedXor_rel;
1186 case ARM::BI_InterlockedXor8_nf:
1187 case ARM::BI_InterlockedXor16_nf:
1188 case ARM::BI_InterlockedXor_nf:
1189 case ARM::BI_InterlockedXor64_nf:
1190 return MSVCIntrin::_InterlockedXor_nf;
1191 case ARM::BI_InterlockedAnd8_acq:
1192 case ARM::BI_InterlockedAnd16_acq:
1193 case ARM::BI_InterlockedAnd_acq:
1194 case ARM::BI_InterlockedAnd64_acq:
1195 return MSVCIntrin::_InterlockedAnd_acq;
1196 case ARM::BI_InterlockedAnd8_rel:
1197 case ARM::BI_InterlockedAnd16_rel:
1198 case ARM::BI_InterlockedAnd_rel:
1199 case ARM::BI_InterlockedAnd64_rel:
1200 return MSVCIntrin::_InterlockedAnd_rel;
1201 case ARM::BI_InterlockedAnd8_nf:
1202 case ARM::BI_InterlockedAnd16_nf:
1203 case ARM::BI_InterlockedAnd_nf:
1204 case ARM::BI_InterlockedAnd64_nf:
1205 return MSVCIntrin::_InterlockedAnd_nf;
1206 case ARM::BI_InterlockedIncrement16_acq:
1207 case ARM::BI_InterlockedIncrement_acq:
1208 case ARM::BI_InterlockedIncrement64_acq:
1209 return MSVCIntrin::_InterlockedIncrement_acq;
1210 case ARM::BI_InterlockedIncrement16_rel:
1211 case ARM::BI_InterlockedIncrement_rel:
1212 case ARM::BI_InterlockedIncrement64_rel:
1213 return MSVCIntrin::_InterlockedIncrement_rel;
1214 case ARM::BI_InterlockedIncrement16_nf:
1215 case ARM::BI_InterlockedIncrement_nf:
1216 case ARM::BI_InterlockedIncrement64_nf:
1217 return MSVCIntrin::_InterlockedIncrement_nf;
1218 case ARM::BI_InterlockedDecrement16_acq:
1219 case ARM::BI_InterlockedDecrement_acq:
1220 case ARM::BI_InterlockedDecrement64_acq:
1221 return MSVCIntrin::_InterlockedDecrement_acq;
1222 case ARM::BI_InterlockedDecrement16_rel:
1223 case ARM::BI_InterlockedDecrement_rel:
1224 case ARM::BI_InterlockedDecrement64_rel:
1225 return MSVCIntrin::_InterlockedDecrement_rel;
1226 case ARM::BI_InterlockedDecrement16_nf:
1227 case ARM::BI_InterlockedDecrement_nf:
1228 case ARM::BI_InterlockedDecrement64_nf:
1229 return MSVCIntrin::_InterlockedDecrement_nf;
1230 }
1231 llvm_unreachable("must return from switch");
1232}
1233
1234static Optional<CodeGenFunction::MSVCIntrin>
1235translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
1236 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1237 switch (BuiltinID) {
1238 default:
1239 return None;
1240 case AArch64::BI_BitScanForward:
1241 case AArch64::BI_BitScanForward64:
1242 return MSVCIntrin::_BitScanForward;
1243 case AArch64::BI_BitScanReverse:
1244 case AArch64::BI_BitScanReverse64:
1245 return MSVCIntrin::_BitScanReverse;
1246 case AArch64::BI_InterlockedAnd64:
1247 return MSVCIntrin::_InterlockedAnd;
1248 case AArch64::BI_InterlockedExchange64:
1249 return MSVCIntrin::_InterlockedExchange;
1250 case AArch64::BI_InterlockedExchangeAdd64:
1251 return MSVCIntrin::_InterlockedExchangeAdd;
1252 case AArch64::BI_InterlockedExchangeSub64:
1253 return MSVCIntrin::_InterlockedExchangeSub;
1254 case AArch64::BI_InterlockedOr64:
1255 return MSVCIntrin::_InterlockedOr;
1256 case AArch64::BI_InterlockedXor64:
1257 return MSVCIntrin::_InterlockedXor;
1258 case AArch64::BI_InterlockedDecrement64:
1259 return MSVCIntrin::_InterlockedDecrement;
1260 case AArch64::BI_InterlockedIncrement64:
1261 return MSVCIntrin::_InterlockedIncrement;
1262 case AArch64::BI_InterlockedExchangeAdd8_acq:
1263 case AArch64::BI_InterlockedExchangeAdd16_acq:
1264 case AArch64::BI_InterlockedExchangeAdd_acq:
1265 case AArch64::BI_InterlockedExchangeAdd64_acq:
1266 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1267 case AArch64::BI_InterlockedExchangeAdd8_rel:
1268 case AArch64::BI_InterlockedExchangeAdd16_rel:
1269 case AArch64::BI_InterlockedExchangeAdd_rel:
1270 case AArch64::BI_InterlockedExchangeAdd64_rel:
1271 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1272 case AArch64::BI_InterlockedExchangeAdd8_nf:
1273 case AArch64::BI_InterlockedExchangeAdd16_nf:
1274 case AArch64::BI_InterlockedExchangeAdd_nf:
1275 case AArch64::BI_InterlockedExchangeAdd64_nf:
1276 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1277 case AArch64::BI_InterlockedExchange8_acq:
1278 case AArch64::BI_InterlockedExchange16_acq:
1279 case AArch64::BI_InterlockedExchange_acq:
1280 case AArch64::BI_InterlockedExchange64_acq:
1281 return MSVCIntrin::_InterlockedExchange_acq;
1282 case AArch64::BI_InterlockedExchange8_rel:
1283 case AArch64::BI_InterlockedExchange16_rel:
1284 case AArch64::BI_InterlockedExchange_rel:
1285 case AArch64::BI_InterlockedExchange64_rel:
1286 return MSVCIntrin::_InterlockedExchange_rel;
1287 case AArch64::BI_InterlockedExchange8_nf:
1288 case AArch64::BI_InterlockedExchange16_nf:
1289 case AArch64::BI_InterlockedExchange_nf:
1290 case AArch64::BI_InterlockedExchange64_nf:
1291 return MSVCIntrin::_InterlockedExchange_nf;
1292 case AArch64::BI_InterlockedCompareExchange8_acq:
1293 case AArch64::BI_InterlockedCompareExchange16_acq:
1294 case AArch64::BI_InterlockedCompareExchange_acq:
1295 case AArch64::BI_InterlockedCompareExchange64_acq:
1296 return MSVCIntrin::_InterlockedCompareExchange_acq;
1297 case AArch64::BI_InterlockedCompareExchange8_rel:
1298 case AArch64::BI_InterlockedCompareExchange16_rel:
1299 case AArch64::BI_InterlockedCompareExchange_rel:
1300 case AArch64::BI_InterlockedCompareExchange64_rel:
1301 return MSVCIntrin::_InterlockedCompareExchange_rel;
1302 case AArch64::BI_InterlockedCompareExchange8_nf:
1303 case AArch64::BI_InterlockedCompareExchange16_nf:
1304 case AArch64::BI_InterlockedCompareExchange_nf:
1305 case AArch64::BI_InterlockedCompareExchange64_nf:
1306 return MSVCIntrin::_InterlockedCompareExchange_nf;
1307 case AArch64::BI_InterlockedCompareExchange128:
1308 return MSVCIntrin::_InterlockedCompareExchange128;
1309 case AArch64::BI_InterlockedCompareExchange128_acq:
1310 return MSVCIntrin::_InterlockedCompareExchange128_acq;
1311 case AArch64::BI_InterlockedCompareExchange128_nf:
1312 return MSVCIntrin::_InterlockedCompareExchange128_nf;
1313 case AArch64::BI_InterlockedCompareExchange128_rel:
1314 return MSVCIntrin::_InterlockedCompareExchange128_rel;
1315 case AArch64::BI_InterlockedOr8_acq:
1316 case AArch64::BI_InterlockedOr16_acq:
1317 case AArch64::BI_InterlockedOr_acq:
1318 case AArch64::BI_InterlockedOr64_acq:
1319 return MSVCIntrin::_InterlockedOr_acq;
1320 case AArch64::BI_InterlockedOr8_rel:
1321 case AArch64::BI_InterlockedOr16_rel:
1322 case AArch64::BI_InterlockedOr_rel:
1323 case AArch64::BI_InterlockedOr64_rel:
1324 return MSVCIntrin::_InterlockedOr_rel;
1325 case AArch64::BI_InterlockedOr8_nf:
1326 case AArch64::BI_InterlockedOr16_nf:
1327 case AArch64::BI_InterlockedOr_nf:
1328 case AArch64::BI_InterlockedOr64_nf:
1329 return MSVCIntrin::_InterlockedOr_nf;
1330 case AArch64::BI_InterlockedXor8_acq:
1331 case AArch64::BI_InterlockedXor16_acq:
1332 case AArch64::BI_InterlockedXor_acq:
1333 case AArch64::BI_InterlockedXor64_acq:
1334 return MSVCIntrin::_InterlockedXor_acq;
1335 case AArch64::BI_InterlockedXor8_rel:
1336 case AArch64::BI_InterlockedXor16_rel:
1337 case AArch64::BI_InterlockedXor_rel:
1338 case AArch64::BI_InterlockedXor64_rel:
1339 return MSVCIntrin::_InterlockedXor_rel;
1340 case AArch64::BI_InterlockedXor8_nf:
1341 case AArch64::BI_InterlockedXor16_nf:
1342 case AArch64::BI_InterlockedXor_nf:
1343 case AArch64::BI_InterlockedXor64_nf:
1344 return MSVCIntrin::_InterlockedXor_nf;
1345 case AArch64::BI_InterlockedAnd8_acq:
1346 case AArch64::BI_InterlockedAnd16_acq:
1347 case AArch64::BI_InterlockedAnd_acq:
1348 case AArch64::BI_InterlockedAnd64_acq:
1349 return MSVCIntrin::_InterlockedAnd_acq;
1350 case AArch64::BI_InterlockedAnd8_rel:
1351 case AArch64::BI_InterlockedAnd16_rel:
1352 case AArch64::BI_InterlockedAnd_rel:
1353 case AArch64::BI_InterlockedAnd64_rel:
1354 return MSVCIntrin::_InterlockedAnd_rel;
1355 case AArch64::BI_InterlockedAnd8_nf:
1356 case AArch64::BI_InterlockedAnd16_nf:
1357 case AArch64::BI_InterlockedAnd_nf:
1358 case AArch64::BI_InterlockedAnd64_nf:
1359 return MSVCIntrin::_InterlockedAnd_nf;
1360 case AArch64::BI_InterlockedIncrement16_acq:
1361 case AArch64::BI_InterlockedIncrement_acq:
1362 case AArch64::BI_InterlockedIncrement64_acq:
1363 return MSVCIntrin::_InterlockedIncrement_acq;
1364 case AArch64::BI_InterlockedIncrement16_rel:
1365 case AArch64::BI_InterlockedIncrement_rel:
1366 case AArch64::BI_InterlockedIncrement64_rel:
1367 return MSVCIntrin::_InterlockedIncrement_rel;
1368 case AArch64::BI_InterlockedIncrement16_nf:
1369 case AArch64::BI_InterlockedIncrement_nf:
1370 case AArch64::BI_InterlockedIncrement64_nf:
1371 return MSVCIntrin::_InterlockedIncrement_nf;
1372 case AArch64::BI_InterlockedDecrement16_acq:
1373 case AArch64::BI_InterlockedDecrement_acq:
1374 case AArch64::BI_InterlockedDecrement64_acq:
1375 return MSVCIntrin::_InterlockedDecrement_acq;
1376 case AArch64::BI_InterlockedDecrement16_rel:
1377 case AArch64::BI_InterlockedDecrement_rel:
1378 case AArch64::BI_InterlockedDecrement64_rel:
1379 return MSVCIntrin::_InterlockedDecrement_rel;
1380 case AArch64::BI_InterlockedDecrement16_nf:
1381 case AArch64::BI_InterlockedDecrement_nf:
1382 case AArch64::BI_InterlockedDecrement64_nf:
1383 return MSVCIntrin::_InterlockedDecrement_nf;
1384 }
1385 llvm_unreachable("must return from switch");
1386}
1387
1388static Optional<CodeGenFunction::MSVCIntrin>
1389translateX86ToMsvcIntrin(unsigned BuiltinID) {
1390 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1391 switch (BuiltinID) {
1392 default:
1393 return None;
1394 case clang::X86::BI_BitScanForward:
1395 case clang::X86::BI_BitScanForward64:
1396 return MSVCIntrin::_BitScanForward;
1397 case clang::X86::BI_BitScanReverse:
1398 case clang::X86::BI_BitScanReverse64:
1399 return MSVCIntrin::_BitScanReverse;
1400 case clang::X86::BI_InterlockedAnd64:
1401 return MSVCIntrin::_InterlockedAnd;
1402 case clang::X86::BI_InterlockedCompareExchange128:
1403 return MSVCIntrin::_InterlockedCompareExchange128;
1404 case clang::X86::BI_InterlockedExchange64:
1405 return MSVCIntrin::_InterlockedExchange;
1406 case clang::X86::BI_InterlockedExchangeAdd64:
1407 return MSVCIntrin::_InterlockedExchangeAdd;
1408 case clang::X86::BI_InterlockedExchangeSub64:
1409 return MSVCIntrin::_InterlockedExchangeSub;
1410 case clang::X86::BI_InterlockedOr64:
1411 return MSVCIntrin::_InterlockedOr;
1412 case clang::X86::BI_InterlockedXor64:
1413 return MSVCIntrin::_InterlockedXor;
1414 case clang::X86::BI_InterlockedDecrement64:
1415 return MSVCIntrin::_InterlockedDecrement;
1416 case clang::X86::BI_InterlockedIncrement64:
1417 return MSVCIntrin::_InterlockedIncrement;
1418 }
1419 llvm_unreachable("must return from switch");
1420}
1421
1422// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1423Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
1424 const CallExpr *E) {
1425 switch (BuiltinID) {
1426 case MSVCIntrin::_BitScanForward:
1427 case MSVCIntrin::_BitScanReverse: {
1428 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1429 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1430
1431 llvm::Type *ArgType = ArgValue->getType();
1432 llvm::Type *IndexType =
1433 IndexAddress.getPointer()->getType()->getPointerElementType();
1434 llvm::Type *ResultType = ConvertType(E->getType());
1435
1436 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1437 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1438 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1439
1440 BasicBlock *Begin = Builder.GetInsertBlock();
1441 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1442 Builder.SetInsertPoint(End);
1443 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1444
1445 Builder.SetInsertPoint(Begin);
1446 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1447 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1448 Builder.CreateCondBr(IsZero, End, NotZero);
1449 Result->addIncoming(ResZero, Begin);
1450
1451 Builder.SetInsertPoint(NotZero);
1452
1453 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1454 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1455 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1456 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1457 Builder.CreateStore(ZeroCount, IndexAddress, false);
1458 } else {
1459 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1460 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1461
1462 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1463 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1464 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1465 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1466 Builder.CreateStore(Index, IndexAddress, false);
1467 }
1468 Builder.CreateBr(End);
1469 Result->addIncoming(ResOne, NotZero);
1470
1471 Builder.SetInsertPoint(End);
1472 return Result;
1473 }
1474 case MSVCIntrin::_InterlockedAnd:
1475 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1476 case MSVCIntrin::_InterlockedExchange:
1477 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1478 case MSVCIntrin::_InterlockedExchangeAdd:
1479 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1480 case MSVCIntrin::_InterlockedExchangeSub:
1481 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1482 case MSVCIntrin::_InterlockedOr:
1483 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1484 case MSVCIntrin::_InterlockedXor:
1485 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1486 case MSVCIntrin::_InterlockedExchangeAdd_acq:
1487 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1488 AtomicOrdering::Acquire);
1489 case MSVCIntrin::_InterlockedExchangeAdd_rel:
1490 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1491 AtomicOrdering::Release);
1492 case MSVCIntrin::_InterlockedExchangeAdd_nf:
1493 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1494 AtomicOrdering::Monotonic);
1495 case MSVCIntrin::_InterlockedExchange_acq:
1496 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1497 AtomicOrdering::Acquire);
1498 case MSVCIntrin::_InterlockedExchange_rel:
1499 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1500 AtomicOrdering::Release);
1501 case MSVCIntrin::_InterlockedExchange_nf:
1502 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1503 AtomicOrdering::Monotonic);
1504 case MSVCIntrin::_InterlockedCompareExchange_acq:
1505 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1506 case MSVCIntrin::_InterlockedCompareExchange_rel:
1507 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1508 case MSVCIntrin::_InterlockedCompareExchange_nf:
1509 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1510 case MSVCIntrin::_InterlockedCompareExchange128:
1511 return EmitAtomicCmpXchg128ForMSIntrin(
1512 *this, E, AtomicOrdering::SequentiallyConsistent);
1513 case MSVCIntrin::_InterlockedCompareExchange128_acq:
1514 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1515 case MSVCIntrin::_InterlockedCompareExchange128_rel:
1516 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1517 case MSVCIntrin::_InterlockedCompareExchange128_nf:
1518 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1519 case MSVCIntrin::_InterlockedOr_acq:
1520 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1521 AtomicOrdering::Acquire);
1522 case MSVCIntrin::_InterlockedOr_rel:
1523 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1524 AtomicOrdering::Release);
1525 case MSVCIntrin::_InterlockedOr_nf:
1526 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1527 AtomicOrdering::Monotonic);
1528 case MSVCIntrin::_InterlockedXor_acq:
1529 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1530 AtomicOrdering::Acquire);
1531 case MSVCIntrin::_InterlockedXor_rel:
1532 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1533 AtomicOrdering::Release);
1534 case MSVCIntrin::_InterlockedXor_nf:
1535 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1536 AtomicOrdering::Monotonic);
1537 case MSVCIntrin::_InterlockedAnd_acq:
1538 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1539 AtomicOrdering::Acquire);
1540 case MSVCIntrin::_InterlockedAnd_rel:
1541 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1542 AtomicOrdering::Release);
1543 case MSVCIntrin::_InterlockedAnd_nf:
1544 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1545 AtomicOrdering::Monotonic);
1546 case MSVCIntrin::_InterlockedIncrement_acq:
1547 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1548 case MSVCIntrin::_InterlockedIncrement_rel:
1549 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1550 case MSVCIntrin::_InterlockedIncrement_nf:
1551 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1552 case MSVCIntrin::_InterlockedDecrement_acq:
1553 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1554 case MSVCIntrin::_InterlockedDecrement_rel:
1555 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1556 case MSVCIntrin::_InterlockedDecrement_nf:
1557 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1558
1559 case MSVCIntrin::_InterlockedDecrement:
1560 return EmitAtomicDecrementValue(*this, E);
1561 case MSVCIntrin::_InterlockedIncrement:
1562 return EmitAtomicIncrementValue(*this, E);
1563
1564 case MSVCIntrin::__fastfail: {
1565 // Request immediate process termination from the kernel. The instruction
1566 // sequences to do this are documented on MSDN:
1567 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1568 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1569 StringRef Asm, Constraints;
1570 switch (ISA) {
1571 default:
1572 ErrorUnsupported(E, "__fastfail call for this architecture");
1573 break;
1574 case llvm::Triple::x86:
1575 case llvm::Triple::x86_64:
1576 Asm = "int $$0x29";
1577 Constraints = "{cx}";
1578 break;
1579 case llvm::Triple::thumb:
1580 Asm = "udf #251";
1581 Constraints = "{r0}";
1582 break;
1583 case llvm::Triple::aarch64:
1584 Asm = "brk #0xF003";
1585 Constraints = "{w0}";
1586 }
1587 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1588 llvm::InlineAsm *IA =
1589 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1590 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1591 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1592 llvm::Attribute::NoReturn);
1593 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1594 CI->setAttributes(NoReturnAttr);
1595 return CI;
1596 }
1597 }
1598 llvm_unreachable("Incorrect MSVC intrinsic!");
1599}
1600
1601namespace {
1602// ARC cleanup for __builtin_os_log_format
1603struct CallObjCArcUse final : EHScopeStack::Cleanup {
1604 CallObjCArcUse(llvm::Value *object) : object(object) {}
1605 llvm::Value *object;
1606
1607 void Emit(CodeGenFunction &CGF, Flags flags) override {
1608 CGF.EmitARCIntrinsicUse(object);
1609 }
1610};
1611}
1612
1613Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1614 BuiltinCheckKind Kind) {
1615 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
1616 && "Unsupported builtin check kind");
1617
1618 Value *ArgValue = EmitScalarExpr(E);
1619 if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
1620 return ArgValue;
1621
1622 SanitizerScope SanScope(this);
1623 Value *Cond = Builder.CreateICmpNE(
1624 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1625 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1626 SanitizerHandler::InvalidBuiltin,
1627 {EmitCheckSourceLocation(E->getExprLoc()),
1628 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1629 None);
1630 return ArgValue;
1631}
1632
1633/// Get the argument type for arguments to os_log_helper.
1634static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1635 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1636 return C.getCanonicalType(UnsignedTy);
1637}
1638
1639llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1640 const analyze_os_log::OSLogBufferLayout &Layout,
1641 CharUnits BufferAlignment) {
1642 ASTContext &Ctx = getContext();
1643
1644 llvm::SmallString<64> Name;
1645 {
1646 raw_svector_ostream OS(Name);
1647 OS << "__os_log_helper";
1648 OS << "_" << BufferAlignment.getQuantity();
1649 OS << "_" << int(Layout.getSummaryByte());
1650 OS << "_" << int(Layout.getNumArgsByte());
1651 for (const auto &Item : Layout.Items)
1652 OS << "_" << int(Item.getSizeByte()) << "_"
1653 << int(Item.getDescriptorByte());
1654 }
1655
1656 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1657 return F;
1658
1659 llvm::SmallVector<QualType, 4> ArgTys;
1660 FunctionArgList Args;
1661 Args.push_back(ImplicitParamDecl::Create(
1662 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1663 ImplicitParamDecl::Other));
1664 ArgTys.emplace_back(Ctx.VoidPtrTy);
1665
1666 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1667 char Size = Layout.Items[I].getSizeByte();
1668 if (!Size)
1669 continue;
1670
1671 QualType ArgTy = getOSLogArgType(Ctx, Size);
1672 Args.push_back(ImplicitParamDecl::Create(
1673 Ctx, nullptr, SourceLocation(),
1674 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1675 ImplicitParamDecl::Other));
1676 ArgTys.emplace_back(ArgTy);
1677 }
1678
1679 QualType ReturnTy = Ctx.VoidTy;
1680 QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
1681
1682 // The helper function has linkonce_odr linkage to enable the linker to merge
1683 // identical functions. To ensure the merging always happens, 'noinline' is
1684 // attached to the function when compiling with -Oz.
1685 const CGFunctionInfo &FI =
1686 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1687 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1688 llvm::Function *Fn = llvm::Function::Create(
1689 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1690 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1691 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
1692 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1693 Fn->setDoesNotThrow();
1694
1695 // Attach 'noinline' at -Oz.
1696 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1697 Fn->addFnAttr(llvm::Attribute::NoInline);
1698
1699 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1700 IdentifierInfo *II = &Ctx.Idents.get(Name);
1701 FunctionDecl *FD = FunctionDecl::Create(
1702 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
1703 FuncionTy, nullptr, SC_PrivateExtern, false, false);
1704 // Avoid generating debug location info for the function.
1705 FD->setImplicit();
1706
1707 StartFunction(FD, ReturnTy, Fn, FI, Args);
1708
1709 // Create a scope with an artificial location for the body of this function.
1710 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1711
1712 CharUnits Offset;
1713 Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
1714 BufferAlignment);
1715 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1716 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1717 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1718 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1719
1720 unsigned I = 1;
1721 for (const auto &Item : Layout.Items) {
1722 Builder.CreateStore(
1723 Builder.getInt8(Item.getDescriptorByte()),
1724 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1725 Builder.CreateStore(
1726 Builder.getInt8(Item.getSizeByte()),
1727 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1728
1729 CharUnits Size = Item.size();
1730 if (!Size.getQuantity())
1731 continue;
1732
1733 Address Arg = GetAddrOfLocalVar(Args[I]);
1734 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1735 Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
1736 "argDataCast");
1737 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1738 Offset += Size;
1739 ++I;
1740 }
1741
1742 FinishFunction();
1743
1744 return Fn;
1745}
1746
1747RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1748 assert(E.getNumArgs() >= 2 &&
1749 "__builtin_os_log_format takes at least 2 arguments");
1750 ASTContext &Ctx = getContext();
1751 analyze_os_log::OSLogBufferLayout Layout;
1752 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1753 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1754 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1755
1756 // Ignore argument 1, the format string. It is not currently used.
1757 CallArgList Args;
1758 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1759
1760 for (const auto &Item : Layout.Items) {
1761 int Size = Item.getSizeByte();
1762 if (!Size)
1763 continue;
1764
1765 llvm::Value *ArgVal;
1766
1767 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1768 uint64_t Val = 0;
1769 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1770 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1771 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1772 } else if (const Expr *TheExpr = Item.getExpr()) {
1773 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1774
1775 // If a temporary object that requires destruction after the full
1776 // expression is passed, push a lifetime-extended cleanup to extend its
1777 // lifetime to the end of the enclosing block scope.
1778 auto LifetimeExtendObject = [&](const Expr *E) {
1779 E = E->IgnoreParenCasts();
1780 // Extend lifetimes of objects returned by function calls and message
1781 // sends.
1782
1783 // FIXME: We should do this in other cases in which temporaries are
1784 // created including arguments of non-ARC types (e.g., C++
1785 // temporaries).
1786 if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
1787 return true;
1788 return false;
1789 };
1790
1791 if (TheExpr->getType()->isObjCRetainableType() &&
1792 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
1793 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
1794 "Only scalar can be a ObjC retainable type");
1795 if (!isa<Constant>(ArgVal)) {
1796 CleanupKind Cleanup = getARCCleanupKind();
1797 QualType Ty = TheExpr->getType();
1798 Address Alloca = Address::invalid();
1799 Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
1800 ArgVal = EmitARCRetain(Ty, ArgVal);
1801 Builder.CreateStore(ArgVal, Addr);
1802 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
1803 CodeGenFunction::destroyARCStrongPrecise,
1804 Cleanup & EHCleanup);
1805
1806 // Push a clang.arc.use call to ensure ARC optimizer knows that the
1807 // argument has to be alive.
1808 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
1809 pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
1810 }
1811 }
1812 } else {
1813 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1814 }
1815
1816 unsigned ArgValSize =
1817 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1818 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1819 ArgValSize);
1820 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1821 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1822 // If ArgVal has type x86_fp80, zero-extend ArgVal.
1823 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1824 Args.add(RValue::get(ArgVal), ArgTy);
1825 }
1826
1827 const CGFunctionInfo &FI =
1828 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
1829 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
1830 Layout, BufAddr.getAlignment());
1831 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
1832 return RValue::get(BufAddr.getPointer());
1833}
1834
1835static bool isSpecialUnsignedMultiplySignedResult(
1836 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
1837 WidthAndSignedness ResultInfo) {
1838 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1839 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
1840 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
1841}
1842
1843static RValue EmitCheckedUnsignedMultiplySignedResult(
1844 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
1845 const clang::Expr *Op2, WidthAndSignedness Op2Info,
1846 const clang::Expr *ResultArg, QualType ResultQTy,
1847 WidthAndSignedness ResultInfo) {
1848 assert(isSpecialUnsignedMultiplySignedResult(
1849 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
1850 "Cannot specialize this multiply");
1851
1852 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
1853 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
1854
1855 llvm::Value *HasOverflow;
1856 llvm::Value *Result = EmitOverflowIntrinsic(
1857 CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
1858
1859 // The intrinsic call will detect overflow when the value is > UINT_MAX,
1860 // however, since the original builtin had a signed result, we need to report
1861 // an overflow when the result is greater than INT_MAX.
1862 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
1863 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
1864
1865 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
1866 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
1867
1868 bool isVolatile =
1869 ResultArg->getType()->getPointeeType().isVolatileQualified();
1870 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1871 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1872 isVolatile);
1873 return RValue::get(HasOverflow);
1874}
1875
1876/// Determine if a binop is a checked mixed-sign multiply we can specialize.
1877static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
1878 WidthAndSignedness Op1Info,
1879 WidthAndSignedness Op2Info,
1880 WidthAndSignedness ResultInfo) {
1881 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1882 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
1883 Op1Info.Signed != Op2Info.Signed;
1884}
1885
1886/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
1887/// the generic checked-binop irgen.
1888static RValue
1889EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
1890 WidthAndSignedness Op1Info, const clang::Expr *Op2,
1891 WidthAndSignedness Op2Info,
1892 const clang::Expr *ResultArg, QualType ResultQTy,
1893 WidthAndSignedness ResultInfo) {
1894 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
1895 Op2Info, ResultInfo) &&
1896 "Not a mixed-sign multipliction we can specialize");
1897
1898 // Emit the signed and unsigned operands.
1899 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
1900 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
1901 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
1902 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
1903 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
1904 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
1905
1906 // One of the operands may be smaller than the other. If so, [s|z]ext it.
1907 if (SignedOpWidth < UnsignedOpWidth)
1908 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
1909 if (UnsignedOpWidth < SignedOpWidth)
1910 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
1911
1912 llvm::Type *OpTy = Signed->getType();
1913 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
1914 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1915 llvm::Type *ResTy = ResultPtr.getElementType();
1916 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
1917
1918 // Take the absolute value of the signed operand.
1919 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
1920 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
1921 llvm::Value *AbsSigned =
1922 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
1923
1924 // Perform a checked unsigned multiplication.
1925 llvm::Value *UnsignedOverflow;
1926 llvm::Value *UnsignedResult =
1927 EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
1928 Unsigned, UnsignedOverflow);
1929
1930 llvm::Value *Overflow, *Result;
1931 if (ResultInfo.Signed) {
1932 // Signed overflow occurs if the result is greater than INT_MAX or lesser
1933 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
1934 auto IntMax =
1935 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
1936 llvm::Value *MaxResult =
1937 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
1938 CGF.Builder.CreateZExt(IsNegative, OpTy));
1939 llvm::Value *SignedOverflow =
1940 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
1941 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
1942
1943 // Prepare the signed result (possibly by negating it).
1944 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
1945 llvm::Value *SignedResult =
1946 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
1947 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
1948 } else {
1949 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
1950 llvm::Value *Underflow = CGF.Builder.CreateAnd(
1951 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
1952 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
1953 if (ResultInfo.Width < OpWidth) {
1954 auto IntMax =
1955 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
1956 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
1957 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
1958 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
1959 }
1960
1961 // Negate the product if it would be negative in infinite precision.
1962 Result = CGF.Builder.CreateSelect(
1963 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
1964
1965 Result = CGF.Builder.CreateTrunc(Result, ResTy);
1966 }
1967 assert(Overflow && Result && "Missing overflow or result");
1968
1969 bool isVolatile =
1970 ResultArg->getType()->getPointeeType().isVolatileQualified();
1971 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1972 isVolatile);
1973 return RValue::get(Overflow);
1974}
1975
1976static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
1977 Value *&RecordPtr, CharUnits Align,
1978 llvm::FunctionCallee Func, int Lvl) {
1979 ASTContext &Context = CGF.getContext();
1980 RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
1981 std::string Pad = std::string(Lvl * 4, ' ');
1982
1983 Value *GString =
1984 CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
1985 Value *Res = CGF.Builder.CreateCall(Func, {GString});
1986
1987 static llvm::DenseMap<QualType, const char *> Types;
1988 if (Types.empty()) {
1989 Types[Context.CharTy] = "%c";
1990 Types[Context.BoolTy] = "%d";
1991 Types[Context.SignedCharTy] = "%hhd";
1992 Types[Context.UnsignedCharTy] = "%hhu";
1993 Types[Context.IntTy] = "%d";
1994 Types[Context.UnsignedIntTy] = "%u";
1995 Types[Context.LongTy] = "%ld";
1996 Types[Context.UnsignedLongTy] = "%lu";
1997 Types[Context.LongLongTy] = "%lld";
1998 Types[Context.UnsignedLongLongTy] = "%llu";
1999 Types[Context.ShortTy] = "%hd";
2000 Types[Context.UnsignedShortTy] = "%hu";
2001 Types[Context.VoidPtrTy] = "%p";
2002 Types[Context.FloatTy] = "%f";
2003 Types[Context.DoubleTy] = "%f";
2004 Types[Context.LongDoubleTy] = "%Lf";
2005 Types[Context.getPointerType(Context.CharTy)] = "%s";
2006 Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
2007 }
2008
2009 for (const auto *FD : RD->fields()) {
2010 Value *FieldPtr = RecordPtr;
2011 if (RD->isUnion())
2012 FieldPtr = CGF.Builder.CreatePointerCast(
2013 FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
2014 else
2015 FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
2016 FD->getFieldIndex());
2017
2018 GString = CGF.Builder.CreateGlobalStringPtr(
2019 llvm::Twine(Pad)
2020 .concat(FD->getType().getAsString())
2021 .concat(llvm::Twine(' '))
2022 .concat(FD->getNameAsString())
2023 .concat(" : ")
2024 .str());
2025 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
2026 Res = CGF.Builder.CreateAdd(Res, TmpRes);
2027
2028 QualType CanonicalType =
2029 FD->getType().getUnqualifiedType().getCanonicalType();
2030
2031 // We check whether we are in a recursive type
2032 if (CanonicalType->isRecordType()) {
2033 TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
2034 Res = CGF.Builder.CreateAdd(TmpRes, Res);
2035 continue;
2036 }
2037
2038 // We try to determine the best format to print the current field
2039 llvm::Twine Format = Types.find(CanonicalType) == Types.end()
2040 ? Types[Context.VoidPtrTy]
2041 : Types[CanonicalType];
2042
2043 Address FieldAddress = Address(FieldPtr, Align);
2044 FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
2045
2046 // FIXME Need to handle bitfield here
2047 GString = CGF.Builder.CreateGlobalStringPtr(
2048 Format.concat(llvm::Twine('\n')).str());
2049 TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
2050 Res = CGF.Builder.CreateAdd(Res, TmpRes);
2051 }
2052
2053 GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
2054 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
2055 Res = CGF.Builder.CreateAdd(Res, TmpRes);
2056 return Res;
2057}
2058
2059static bool
2060TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
2061 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2062 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2063 Ty = Ctx.getBaseElementType(Arr);
2064
2065 const auto *Record = Ty->getAsCXXRecordDecl();
2066 if (!Record)
2067 return false;
2068
2069 // We've already checked this type, or are in the process of checking it.
2070 if (!Seen.insert(Record).second)
2071 return false;
2072
2073 assert(Record->hasDefinition() &&
2074 "Incomplete types should already be diagnosed");
2075
2076 if (Record->isDynamicClass())
2077 return true;
2078
2079 for (FieldDecl *F : Record->fields()) {
2080 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2081 return true;
2082 }
2083 return false;
2084}
2085
2086/// Determine if the specified type requires laundering by checking if it is a
2087/// dynamic class type or contains a subobject which is a dynamic class type.
2088static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
2089 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2090 return false;
2091 llvm::SmallPtrSet<const Decl *, 16> Seen;
2092 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2093}
2094
2095RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2096 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2097 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2098
2099 // The builtin's shift arg may have a different type than the source arg and
2100 // result, but the LLVM intrinsic uses the same type for all values.
2101 llvm::Type *Ty = Src->getType();
2102 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2103
2104 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2105 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2106 Function *F = CGM.getIntrinsic(IID, Ty);
2107 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
2108}
2109
2110// Map math builtins for long-double to f128 version.
2111static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2112 switch (BuiltinID) {
2113#define MUTATE_LDBL(func) \
2114 case Builtin::BI__builtin_##func##l: \
2115 return Builtin::BI__builtin_##func##f128;
2116 MUTATE_LDBL(sqrt)
2117 MUTATE_LDBL(cbrt)
2118 MUTATE_LDBL(fabs)
2119 MUTATE_LDBL(log)
2120 MUTATE_LDBL(log2)
2121 MUTATE_LDBL(log10)
2122 MUTATE_LDBL(log1p)
2123 MUTATE_LDBL(logb)
2124 MUTATE_LDBL(exp)
2125 MUTATE_LDBL(exp2)
2126 MUTATE_LDBL(expm1)
2127 MUTATE_LDBL(fdim)
2128 MUTATE_LDBL(hypot)
2129 MUTATE_LDBL(ilogb)
2130 MUTATE_LDBL(pow)
2131 MUTATE_LDBL(fmin)
2132 MUTATE_LDBL(fmax)
2133 MUTATE_LDBL(ceil)
2134 MUTATE_LDBL(trunc)
2135 MUTATE_LDBL(rint)
2136 MUTATE_LDBL(nearbyint)
2137 MUTATE_LDBL(round)
2138 MUTATE_LDBL(floor)
2139 MUTATE_LDBL(lround)
2140 MUTATE_LDBL(llround)
2141 MUTATE_LDBL(lrint)
2142 MUTATE_LDBL(llrint)
2143 MUTATE_LDBL(fmod)
2144 MUTATE_LDBL(modf)
2145 MUTATE_LDBL(nan)
2146 MUTATE_LDBL(nans)
2147 MUTATE_LDBL(inf)
2148 MUTATE_LDBL(fma)
2149 MUTATE_LDBL(sin)
2150 MUTATE_LDBL(cos)
2151 MUTATE_LDBL(tan)
2152 MUTATE_LDBL(sinh)
2153 MUTATE_LDBL(cosh)
2154 MUTATE_LDBL(tanh)
2155 MUTATE_LDBL(asin)
2156 MUTATE_LDBL(acos)
2157 MUTATE_LDBL(atan)
2158 MUTATE_LDBL(asinh)
2159 MUTATE_LDBL(acosh)
2160 MUTATE_LDBL(atanh)
2161 MUTATE_LDBL(atan2)
2162 MUTATE_LDBL(erf)
2163 MUTATE_LDBL(erfc)
2164 MUTATE_LDBL(ldexp)
2165 MUTATE_LDBL(frexp)
2166 MUTATE_LDBL(huge_val)
2167 MUTATE_LDBL(copysign)
2168 MUTATE_LDBL(nextafter)
2169 MUTATE_LDBL(nexttoward)
2170 MUTATE_LDBL(remainder)
2171 MUTATE_LDBL(remquo)
2172 MUTATE_LDBL(scalbln)
2173 MUTATE_LDBL(scalbn)
2174 MUTATE_LDBL(tgamma)
2175 MUTATE_LDBL(lgamma)
2176#undef MUTATE_LDBL
2177 default:
2178 return BuiltinID;
2179 }
2180}
2181
2182RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
2183 const CallExpr *E,
2184 ReturnValueSlot ReturnValue) {
2185 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2186 // See if we can constant fold this builtin. If so, don't emit it at all.
2187 Expr::EvalResult Result;
2188 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
2189 !Result.hasSideEffects()) {
2190 if (Result.Val.isInt())
2191 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2192 Result.Val.getInt()));
2193 if (Result.Val.isFloat())
2194 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2195 Result.Val.getFloat()));
2196 }
2197
2198 // If current long-double semantics is IEEE 128-bit, replace math builtins
2199 // of long-double with f128 equivalent.
2200 // TODO: This mutation should also be applied to other targets other than PPC,
2201 // after backend supports IEEE 128-bit style libcalls.
2202 if (getTarget().getTriple().isPPC64() &&
2203 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2204 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2205
2206 // If the builtin has been declared explicitly with an assembler label,
2207 // disable the specialized emitting below. Ideally we should communicate the
2208 // rename in IR, or at least avoid generating the intrinsic calls that are
2209 // likely to get lowered to the renamed library functions.
2210 const unsigned BuiltinIDIfNoAsmLabel =
2211 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2212
2213 // There are LLVM math intrinsics/instructions corresponding to math library
2214 // functions except the LLVM op will never set errno while the math library
2215 // might. Also, math builtins have the same semantics as their math library
2216 // twins. Thus, we can transform math library and builtin calls to their
2217 // LLVM counterparts if the call is marked 'const' (known to never set errno).
2218 if (FD->hasAttr<ConstAttr>()) {
2219 switch (BuiltinIDIfNoAsmLabel) {
2220 case Builtin::BIceil:
2221 case Builtin::BIceilf:
2222 case Builtin::BIceill:
2223 case Builtin::BI__builtin_ceil:
2224 case Builtin::BI__builtin_ceilf:
2225 case Builtin::BI__builtin_ceilf16:
2226 case Builtin::BI__builtin_ceill:
2227 case Builtin::BI__builtin_ceilf128:
2228 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2229 Intrinsic::ceil,
2230 Intrinsic::experimental_constrained_ceil));
2231
2232 case Builtin::BIcopysign:
2233 case Builtin::BIcopysignf:
2234 case Builtin::BIcopysignl:
2235 case Builtin::BI__builtin_copysign:
2236 case Builtin::BI__builtin_copysignf:
2237 case Builtin::BI__builtin_copysignf16:
2238 case Builtin::BI__builtin_copysignl:
2239 case Builtin::BI__builtin_copysignf128:
2240 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
2241
2242 case Builtin::BIcos:
2243 case Builtin::BIcosf:
2244 case Builtin::BIcosl:
2245 case Builtin::BI__builtin_cos:
2246 case Builtin::BI__builtin_cosf:
2247 case Builtin::BI__builtin_cosf16:
2248 case Builtin::BI__builtin_cosl:
2249 case Builtin::BI__builtin_cosf128:
2250 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2251 Intrinsic::cos,
2252 Intrinsic::experimental_constrained_cos));
2253
2254 case Builtin::BIexp:
2255 case Builtin::BIexpf:
2256 case Builtin::BIexpl:
2257 case Builtin::BI__builtin_exp:
2258 case Builtin::BI__builtin_expf:
2259 case Builtin::BI__builtin_expf16:
2260 case Builtin::BI__builtin_expl:
2261 case Builtin::BI__builtin_expf128:
2262 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2263 Intrinsic::exp,
2264 Intrinsic::experimental_constrained_exp));
2265
2266 case Builtin::BIexp2:
2267 case Builtin::BIexp2f:
2268 case Builtin::BIexp2l:
2269 case Builtin::BI__builtin_exp2:
2270 case Builtin::BI__builtin_exp2f:
2271 case Builtin::BI__builtin_exp2f16:
2272 case Builtin::BI__builtin_exp2l:
2273 case Builtin::BI__builtin_exp2f128:
2274 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2275 Intrinsic::exp2,
2276 Intrinsic::experimental_constrained_exp2));
2277
2278 case Builtin::BIfabs:
2279 case Builtin::BIfabsf:
2280 case Builtin::BIfabsl:
2281 case Builtin::BI__builtin_fabs:
2282 case Builtin::BI__builtin_fabsf:
2283 case Builtin::BI__builtin_fabsf16:
2284 case Builtin::BI__builtin_fabsl:
2285 case Builtin::BI__builtin_fabsf128:
2286 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
2287
2288 case Builtin::BIfloor:
2289 case Builtin::BIfloorf:
2290 case Builtin::BIfloorl:
2291 case Builtin::BI__builtin_floor:
2292 case Builtin::BI__builtin_floorf:
2293 case Builtin::BI__builtin_floorf16:
2294 case Builtin::BI__builtin_floorl:
2295 case Builtin::BI__builtin_floorf128:
2296 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2297 Intrinsic::floor,
2298 Intrinsic::experimental_constrained_floor));
2299
2300 case Builtin::BIfma:
2301 case Builtin::BIfmaf:
2302 case Builtin::BIfmal:
2303 case Builtin::BI__builtin_fma:
2304 case Builtin::BI__builtin_fmaf:
2305 case Builtin::BI__builtin_fmaf16:
2306 case Builtin::BI__builtin_fmal:
2307 case Builtin::BI__builtin_fmaf128:
2308 return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
2309 Intrinsic::fma,
2310 Intrinsic::experimental_constrained_fma));
2311
2312 case Builtin::BIfmax:
2313 case Builtin::BIfmaxf:
2314 case Builtin::BIfmaxl:
2315 case Builtin::BI__builtin_fmax:
2316 case Builtin::BI__builtin_fmaxf:
2317 case Builtin::BI__builtin_fmaxf16:
2318 case Builtin::BI__builtin_fmaxl:
2319 case Builtin::BI__builtin_fmaxf128:
2320 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2321 Intrinsic::maxnum,
2322 Intrinsic::experimental_constrained_maxnum));
2323
2324 case Builtin::BIfmin:
2325 case Builtin::BIfminf:
2326 case Builtin::BIfminl:
2327 case Builtin::BI__builtin_fmin:
2328 case Builtin::BI__builtin_fminf:
2329 case Builtin::BI__builtin_fminf16:
2330 case Builtin::BI__builtin_fminl:
2331 case Builtin::BI__builtin_fminf128:
2332 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2333 Intrinsic::minnum,
2334 Intrinsic::experimental_constrained_minnum));
2335
2336 // fmod() is a special-case. It maps to the frem instruction rather than an
2337 // LLVM intrinsic.
2338 case Builtin::BIfmod:
2339 case Builtin::BIfmodf:
2340 case Builtin::BIfmodl:
2341 case Builtin::BI__builtin_fmod:
2342 case Builtin::BI__builtin_fmodf:
2343 case Builtin::BI__builtin_fmodf16:
2344 case Builtin::BI__builtin_fmodl:
2345 case Builtin::BI__builtin_fmodf128: {
2346 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2347 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2348 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2349 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2350 }
2351
2352 case Builtin::BIlog:
2353 case Builtin::BIlogf:
2354 case Builtin::BIlogl:
2355 case Builtin::BI__builtin_log:
2356 case Builtin::BI__builtin_logf:
2357 case Builtin::BI__builtin_logf16:
2358 case Builtin::BI__builtin_logl:
2359 case Builtin::BI__builtin_logf128:
2360 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2361 Intrinsic::log,
2362 Intrinsic::experimental_constrained_log));
2363
2364 case Builtin::BIlog10:
2365 case Builtin::BIlog10f:
2366 case Builtin::BIlog10l:
2367 case Builtin::BI__builtin_log10:
2368 case Builtin::BI__builtin_log10f:
2369 case Builtin::BI__builtin_log10f16:
2370 case Builtin::BI__builtin_log10l:
2371 case Builtin::BI__builtin_log10f128:
2372 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2373 Intrinsic::log10,
2374 Intrinsic::experimental_constrained_log10));
2375
2376 case Builtin::BIlog2:
2377 case Builtin::BIlog2f:
2378 case Builtin::BIlog2l:
2379 case Builtin::BI__builtin_log2:
2380 case Builtin::BI__builtin_log2f:
2381 case Builtin::BI__builtin_log2f16:
2382 case Builtin::BI__builtin_log2l:
2383 case Builtin::BI__builtin_log2f128:
2384 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2385 Intrinsic::log2,
2386 Intrinsic::experimental_constrained_log2));
2387
2388 case Builtin::BInearbyint:
2389 case Builtin::BInearbyintf:
2390 case Builtin::BInearbyintl:
2391 case Builtin::BI__builtin_nearbyint:
2392 case Builtin::BI__builtin_nearbyintf:
2393 case Builtin::BI__builtin_nearbyintl:
2394 case Builtin::BI__builtin_nearbyintf128:
2395 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2396 Intrinsic::nearbyint,
2397 Intrinsic::experimental_constrained_nearbyint));
2398
2399 case Builtin::BIpow:
2400 case Builtin::BIpowf:
2401 case Builtin::BIpowl:
2402 case Builtin::BI__builtin_pow:
2403 case Builtin::BI__builtin_powf:
2404 case Builtin::BI__builtin_powf16:
2405 case Builtin::BI__builtin_powl:
2406 case Builtin::BI__builtin_powf128:
2407 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2408 Intrinsic::pow,
2409 Intrinsic::experimental_constrained_pow));
2410
2411 case Builtin::BIrint:
2412 case Builtin::BIrintf:
2413 case Builtin::BIrintl:
2414 case Builtin::BI__builtin_rint:
2415 case Builtin::BI__builtin_rintf:
2416 case Builtin::BI__builtin_rintf16:
2417 case Builtin::BI__builtin_rintl:
2418 case Builtin::BI__builtin_rintf128:
2419 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2420 Intrinsic::rint,
2421 Intrinsic::experimental_constrained_rint));
2422
2423 case Builtin::BIround:
2424 case Builtin::BIroundf:
2425 case Builtin::BIroundl:
2426 case Builtin::BI__builtin_round:
2427 case Builtin::BI__builtin_roundf:
2428 case Builtin::BI__builtin_roundf16:
2429 case Builtin::BI__builtin_roundl:
2430 case Builtin::BI__builtin_roundf128:
2431 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2432 Intrinsic::round,
2433 Intrinsic::experimental_constrained_round));
2434
2435 case Builtin::BIsin:
2436 case Builtin::BIsinf:
2437 case Builtin::BIsinl:
2438 case Builtin::BI__builtin_sin:
2439 case Builtin::BI__builtin_sinf:
2440 case Builtin::BI__builtin_sinf16:
2441 case Builtin::BI__builtin_sinl:
2442 case Builtin::BI__builtin_sinf128:
2443 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2444 Intrinsic::sin,
2445 Intrinsic::experimental_constrained_sin));
2446
2447 case Builtin::BIsqrt:
2448 case Builtin::BIsqrtf:
2449 case Builtin::BIsqrtl:
2450 case Builtin::BI__builtin_sqrt:
2451 case Builtin::BI__builtin_sqrtf:
2452 case Builtin::BI__builtin_sqrtf16:
2453 case Builtin::BI__builtin_sqrtl:
2454 case Builtin::BI__builtin_sqrtf128:
2455 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2456 Intrinsic::sqrt,
2457 Intrinsic::experimental_constrained_sqrt));
2458
2459 case Builtin::BItrunc:
2460 case Builtin::BItruncf:
2461 case Builtin::BItruncl:
2462 case Builtin::BI__builtin_trunc:
2463 case Builtin::BI__builtin_truncf:
2464 case Builtin::BI__builtin_truncf16:
2465 case Builtin::BI__builtin_truncl:
2466 case Builtin::BI__builtin_truncf128:
2467 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2468 Intrinsic::trunc,
2469 Intrinsic::experimental_constrained_trunc));
2470
2471 case Builtin::BIlround:
2472 case Builtin::BIlroundf:
2473 case Builtin::BIlroundl:
2474 case Builtin::BI__builtin_lround:
2475 case Builtin::BI__builtin_lroundf:
2476 case Builtin::BI__builtin_lroundl:
2477 case Builtin::BI__builtin_lroundf128:
2478 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2479 *this, E, Intrinsic::lround,
2480 Intrinsic::experimental_constrained_lround));
2481
2482 case Builtin::BIllround:
2483 case Builtin::BIllroundf:
2484 case Builtin::BIllroundl:
2485 case Builtin::BI__builtin_llround:
2486 case Builtin::BI__builtin_llroundf:
2487 case Builtin::BI__builtin_llroundl:
2488 case Builtin::BI__builtin_llroundf128:
2489 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2490 *this, E, Intrinsic::llround,
2491 Intrinsic::experimental_constrained_llround));
2492
2493 case Builtin::BIlrint:
2494 case Builtin::BIlrintf:
2495 case Builtin::BIlrintl:
2496 case Builtin::BI__builtin_lrint:
2497 case Builtin::BI__builtin_lrintf:
2498 case Builtin::BI__builtin_lrintl:
2499 case Builtin::BI__builtin_lrintf128:
2500 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2501 *this, E, Intrinsic::lrint,
2502 Intrinsic::experimental_constrained_lrint));
2503
2504 case Builtin::BIllrint:
2505 case Builtin::BIllrintf:
2506 case Builtin::BIllrintl:
2507 case Builtin::BI__builtin_llrint:
2508 case Builtin::BI__builtin_llrintf:
2509 case Builtin::BI__builtin_llrintl:
2510 case Builtin::BI__builtin_llrintf128:
2511 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2512 *this, E, Intrinsic::llrint,
2513 Intrinsic::experimental_constrained_llrint));
2514
2515 default:
2516 break;
2517 }
2518 }
2519
2520 switch (BuiltinIDIfNoAsmLabel) {
2521 default: break;
2522 case Builtin::BI__builtin___CFStringMakeConstantString:
2523 case Builtin::BI__builtin___NSStringMakeConstantString:
2524 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
2525 case Builtin::BI__builtin_stdarg_start:
2526 case Builtin::BI__builtin_va_start:
2527 case Builtin::BI__va_start:
2528 case Builtin::BI__builtin_va_end:
2529 return RValue::get(
2530 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
2531 ? EmitScalarExpr(E->getArg(0))
2532 : EmitVAListRef(E->getArg(0)).getPointer(),
2533 BuiltinID != Builtin::BI__builtin_va_end));
2534 case Builtin::BI__builtin_va_copy: {
2535 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
2536 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
2537
2538 llvm::Type *Type = Int8PtrTy;
2539
2540 DstPtr = Builder.CreateBitCast(DstPtr, Type);
2541 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
2542 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
2543 {DstPtr, SrcPtr}));
2544 }
2545 case Builtin::BI__builtin_abs:
2546 case Builtin::BI__builtin_labs:
2547 case Builtin::BI__builtin_llabs: {
2548 // X < 0 ? -X : X
2549 // The negation has 'nsw' because abs of INT_MIN is undefined.
2550 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2551 Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
2552 Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
2553 Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2554 Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
2555 return RValue::get(Result);
2556 }
2557 case Builtin::BI__builtin_complex: {
2558 Value *Real = EmitScalarExpr(E->getArg(0));
2559 Value *Imag = EmitScalarExpr(E->getArg(1));
2560 return RValue::getComplex({Real, Imag});
2561 }
2562 case Builtin::BI__builtin_conj:
2563 case Builtin::BI__builtin_conjf:
2564 case Builtin::BI__builtin_conjl:
2565 case Builtin::BIconj:
2566 case Builtin::BIconjf:
2567 case Builtin::BIconjl: {
2568 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2569 Value *Real = ComplexVal.first;
2570 Value *Imag = ComplexVal.second;
2571 Imag = Builder.CreateFNeg(Imag, "neg");
2572 return RValue::getComplex(std::make_pair(Real, Imag));
2573 }
2574 case Builtin::BI__builtin_creal:
2575 case Builtin::BI__builtin_crealf:
2576 case Builtin::BI__builtin_creall:
2577 case Builtin::BIcreal:
2578 case Builtin::BIcrealf:
2579 case Builtin::BIcreall: {
2580 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2581 return RValue::get(ComplexVal.first);
2582 }
2583
2584 case Builtin::BI__builtin_dump_struct: {
2585 llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
2586 llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
2587 LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
2588
2589 Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
2590 CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
2591
2592 const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
2593 QualType Arg0Type = Arg0->getType()->getPointeeType();
2594
2595 Value *RecordPtr = EmitScalarExpr(Arg0);
2596 Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
2597 {LLVMFuncType, Func}, 0);
2598 return RValue::get(Res);
2599 }
2600
2601 case Builtin::BI__builtin_preserve_access_index: {
2602 // Only enabled preserved access index region when debuginfo
2603 // is available as debuginfo is needed to preserve user-level
2604 // access pattern.
2605 if (!getDebugInfo()) {
2606 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
2607 return RValue::get(EmitScalarExpr(E->getArg(0)));
2608 }
2609
2610 // Nested builtin_preserve_access_index() not supported
2611 if (IsInPreservedAIRegion) {
2612 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
2613 return RValue::get(EmitScalarExpr(E->getArg(0)));
2614 }
2615
2616 IsInPreservedAIRegion = true;
2617 Value *Res = EmitScalarExpr(E->getArg(0));
2618 IsInPreservedAIRegion = false;
2619 return RValue::get(Res);
2620 }
2621
2622 case Builtin::BI__builtin_cimag:
2623 case Builtin::BI__builtin_cimagf:
2624 case Builtin::BI__builtin_cimagl:
2625 case Builtin::BIcimag:
2626 case Builtin::BIcimagf:
2627 case Builtin::BIcimagl: {
2628 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2629 return RValue::get(ComplexVal.second);
2630 }
2631
2632 case Builtin::BI__builtin_clrsb:
2633 case Builtin::BI__builtin_clrsbl:
2634 case Builtin::BI__builtin_clrsbll: {
2635 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
2636 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2637
2638 llvm::Type *ArgType = ArgValue->getType();
2639 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2640
2641 llvm::Type *ResultType = ConvertType(E->getType());
2642 Value *Zero = llvm::Constant::getNullValue(ArgType);
2643 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
2644 Value *Inverse = Builder.CreateNot(ArgValue, "not");
2645 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
2646 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
2647 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
2648 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2649 "cast");
2650 return RValue::get(Result);
2651 }
2652 case Builtin::BI__builtin_ctzs:
2653 case Builtin::BI__builtin_ctz:
2654 case Builtin::BI__builtin_ctzl:
2655 case Builtin::BI__builtin_ctzll: {
2656 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
2657
2658 llvm::Type *ArgType = ArgValue->getType();
2659 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2660
2661 llvm::Type *ResultType = ConvertType(E->getType());
2662 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2663 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2664 if (Result->getType() != ResultType)
2665 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2666 "cast");
2667 return RValue::get(Result);
2668 }
2669 case Builtin::BI__builtin_clzs:
2670 case Builtin::BI__builtin_clz:
2671 case Builtin::BI__builtin_clzl:
2672 case Builtin::BI__builtin_clzll: {
2673 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
2674
2675 llvm::Type *ArgType = ArgValue->getType();
2676 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2677
2678 llvm::Type *ResultType = ConvertType(E->getType());
2679 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2680 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2681 if (Result->getType() != ResultType)
2682 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2683 "cast");
2684 return RValue::get(Result);
2685 }
2686 case Builtin::BI__builtin_ffs:
2687 case Builtin::BI__builtin_ffsl:
2688 case Builtin::BI__builtin_ffsll: {
2689 // ffs(x) -> x ? cttz(x) + 1 : 0
2690 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2691
2692 llvm::Type *ArgType = ArgValue->getType();
2693 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2694
2695 llvm::Type *ResultType = ConvertType(E->getType());
2696 Value *Tmp =
2697 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
2698 llvm::ConstantInt::get(ArgType, 1));
2699 Value *Zero = llvm::Constant::getNullValue(ArgType);
2700 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
2701 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
2702 if (Result->getType() != ResultType)
2703 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2704 "cast");
2705 return RValue::get(Result);
2706 }
2707 case Builtin::BI__builtin_parity:
2708 case Builtin::BI__builtin_parityl:
2709 case Builtin::BI__builtin_parityll: {
2710 // parity(x) -> ctpop(x) & 1
2711 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2712
2713 llvm::Type *ArgType = ArgValue->getType();
2714 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2715
2716 llvm::Type *ResultType = ConvertType(E->getType());
2717 Value *Tmp = Builder.CreateCall(F, ArgValue);
2718 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
2719 if (Result->getType() != ResultType)
2720 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2721 "cast");
2722 return RValue::get(Result);
2723 }
2724 case Builtin::BI__lzcnt16:
2725 case Builtin::BI__lzcnt:
2726 case Builtin::BI__lzcnt64: {
2727 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2728
2729 llvm::Type *ArgType = ArgValue->getType();
2730 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2731
2732 llvm::Type *ResultType = ConvertType(E->getType());
2733 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2734 if (Result->getType() != ResultType)
2735 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2736 "cast");
2737 return RValue::get(Result);
2738 }
2739 case Builtin::BI__popcnt16:
2740 case Builtin::BI__popcnt:
2741 case Builtin::BI__popcnt64:
2742 case Builtin::BI__builtin_popcount:
2743 case Builtin::BI__builtin_popcountl:
2744 case Builtin::BI__builtin_popcountll: {
2745 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2746
2747 llvm::Type *ArgType = ArgValue->getType();
2748 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2749
2750 llvm::Type *ResultType = ConvertType(E->getType());
2751 Value *Result = Builder.CreateCall(F, ArgValue);
2752 if (Result->getType() != ResultType)
2753 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2754 "cast");
2755 return RValue::get(Result);
2756 }
2757 case Builtin::BI__builtin_unpredictable: {
2758 // Always return the argument of __builtin_unpredictable. LLVM does not
2759 // handle this builtin. Metadata for this builtin should be added directly
2760 // to instructions such as branches or switches that use it.
2761 return RValue::get(EmitScalarExpr(E->getArg(0)));
2762 }
2763 case Builtin::BI__builtin_expect: {
2764 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2765 llvm::Type *ArgType = ArgValue->getType();
2766
2767 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2768 // Don't generate llvm.expect on -O0 as the backend won't use it for
2769 // anything.
2770 // Note, we still IRGen ExpectedValue because it could have side-effects.
2771 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2772 return RValue::get(ArgValue);
2773
2774 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
2775 Value *Result =
2776 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
2777 return RValue::get(Result);
2778 }
2779 case Builtin::BI__builtin_expect_with_probability: {
2780 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2781 llvm::Type *ArgType = ArgValue->getType();
2782
2783 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2784 llvm::APFloat Probability(0.0);
2785 const Expr *ProbArg = E->getArg(2);
2786 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
2787 assert(EvalSucceed && "probability should be able to evaluate as float");
2788 (void)EvalSucceed;
2789 bool LoseInfo = false;
2790 Probability.convert(llvm::APFloat::IEEEdouble(),
2791 llvm::RoundingMode::Dynamic, &LoseInfo);
2792 llvm::Type *Ty = ConvertType(ProbArg->getType());
2793 Constant *Confidence = ConstantFP::get(Ty, Probability);
2794 // Don't generate llvm.expect.with.probability on -O0 as the backend
2795 // won't use it for anything.
2796 // Note, we still IRGen ExpectedValue because it could have side-effects.
2797 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2798 return RValue::get(ArgValue);
2799
2800 Function *FnExpect =
2801 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
2802 Value *Result = Builder.CreateCall(
2803 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
2804 return RValue::get(Result);
2805 }
2806 case Builtin::BI__builtin_assume_aligned: {
2807 const Expr *Ptr = E->getArg(0);
2808 Value *PtrValue = EmitScalarExpr(Ptr);
2809 Value *OffsetValue =
2810 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
2811
2812 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
2813 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
2814 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
2815 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
2816 llvm::Value::MaximumAlignment);
2817
2818 emitAlignmentAssumption(PtrValue, Ptr,
2819 /*The expr loc is sufficient.*/ SourceLocation(),
2820 AlignmentCI, OffsetValue);
2821 return RValue::get(PtrValue);
2822 }
2823 case Builtin::BI__assume:
2824 case Builtin::BI__builtin_assume: {
2825 if (E->getArg(0)->HasSideEffects(getContext()))
2826 return RValue::get(nullptr);
2827
2828 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2829 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
2830 return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
2831 }
2832 case Builtin::BI__builtin_bswap16:
2833 case Builtin::BI__builtin_bswap32:
2834 case Builtin::BI__builtin_bswap64: {
2835 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
2836 }
2837 case Builtin::BI__builtin_bitreverse8:
2838 case Builtin::BI__builtin_bitreverse16:
2839 case Builtin::BI__builtin_bitreverse32:
2840 case Builtin::BI__builtin_bitreverse64: {
2841 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
2842 }
2843 case Builtin::BI__builtin_rotateleft8:
2844 case Builtin::BI__builtin_rotateleft16:
2845 case Builtin::BI__builtin_rotateleft32:
2846 case Builtin::BI__builtin_rotateleft64:
2847 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2848 case Builtin::BI_rotl16:
2849 case Builtin::BI_rotl:
2850 case Builtin::BI_lrotl:
2851 case Builtin::BI_rotl64:
2852 return emitRotate(E, false);
2853
2854 case Builtin::BI__builtin_rotateright8:
2855 case Builtin::BI__builtin_rotateright16:
2856 case Builtin::BI__builtin_rotateright32:
2857 case Builtin::BI__builtin_rotateright64:
2858 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2859 case Builtin::BI_rotr16:
2860 case Builtin::BI_rotr:
2861 case Builtin::BI_lrotr:
2862 case Builtin::BI_rotr64:
2863 return emitRotate(E, true);
2864
2865 case Builtin::BI__builtin_constant_p: {
2866 llvm::Type *ResultType = ConvertType(E->getType());
2867
2868 const Expr *Arg = E->getArg(0);
2869 QualType ArgType = Arg->getType();
2870 // FIXME: The allowance for Obj-C pointers and block pointers is historical
2871 // and likely a mistake.
2872 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
2873 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
2874 // Per the GCC documentation, only numeric constants are recognized after
2875 // inlining.
2876 return RValue::get(ConstantInt::get(ResultType, 0));
2877
2878 if (Arg->HasSideEffects(getContext()))
2879 // The argument is unevaluated, so be conservative if it might have
2880 // side-effects.
2881 return RValue::get(ConstantInt::get(ResultType, 0));
2882
2883 Value *ArgValue = EmitScalarExpr(Arg);
2884 if (ArgType->isObjCObjectPointerType()) {
2885 // Convert Objective-C objects to id because we cannot distinguish between
2886 // LLVM types for Obj-C classes as they are opaque.
2887 ArgType = CGM.getContext().getObjCIdType();
2888 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
2889 }
2890 Function *F =
2891 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
2892 Value *Result = Builder.CreateCall(F, ArgValue);
2893 if (Result->getType() != ResultType)
2894 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
2895 return RValue::get(Result);
2896 }
2897 case Builtin::BI__builtin_dynamic_object_size:
2898 case Builtin::BI__builtin_object_size: {
2899 unsigned Type =
2900 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
2901 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
2902
2903 // We pass this builtin onto the optimizer so that it can figure out the
2904 // object size in more complex cases.
2905 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
2906 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
2907 /*EmittedE=*/nullptr, IsDynamic));
2908 }
2909 case Builtin::BI__builtin_prefetch: {
2910 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
2911 // FIXME: Technically these constants should of type 'int', yes?
2912 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
2913 llvm::ConstantInt::get(Int32Ty, 0);
2914 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
2915 llvm::ConstantInt::get(Int32Ty, 3);
2916 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
2917 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
2918 return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
2919 }
2920 case Builtin::BI__builtin_readcyclecounter: {
2921 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
2922 return RValue::get(Builder.CreateCall(F));
2923 }
2924 case Builtin::BI__builtin___clear_cache: {
2925 Value *Begin = EmitScalarExpr(E->getArg(0));
2926 Value *End = EmitScalarExpr(E->getArg(1));
2927 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
2928 return RValue::get(Builder.CreateCall(F, {Begin, End}));
2929 }
2930 case Builtin::BI__builtin_trap:
2931 return RValue::get(EmitTrapCall(Intrinsic::trap));
2932 case Builtin::BI__debugbreak:
2933 return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
2934 case Builtin::BI__builtin_unreachable: {
2935 EmitUnreachable(E->getExprLoc());
2936
2937 // We do need to preserve an insertion point.
2938 EmitBlock(createBasicBlock("unreachable.cont"));
2939
2940 return RValue::get(nullptr);
2941 }
2942
2943 case Builtin::BI__builtin_powi:
2944 case Builtin::BI__builtin_powif:
2945 case Builtin::BI__builtin_powil:
2946 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
2947 *this, E, Intrinsic::powi, Intrinsic::experimental_constrained_powi));
2948
2949 case Builtin::BI__builtin_isgreater:
2950 case Builtin::BI__builtin_isgreaterequal:
2951 case Builtin::BI__builtin_isless:
2952 case Builtin::BI__builtin_islessequal:
2953 case Builtin::BI__builtin_islessgreater:
2954 case Builtin::BI__builtin_isunordered: {
2955 // Ordered comparisons: we know the arguments to these are matching scalar
2956 // floating point values.
2957 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2958 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
2959 Value *LHS = EmitScalarExpr(E->getArg(0));
2960 Value *RHS = EmitScalarExpr(E->getArg(1));
2961
2962 switch (BuiltinID) {
2963 default: llvm_unreachable("Unknown ordered comparison");
2964 case Builtin::BI__builtin_isgreater:
2965 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
2966 break;
2967 case Builtin::BI__builtin_isgreaterequal:
2968 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
2969 break;
2970 case Builtin::BI__builtin_isless:
2971 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
2972 break;
2973 case Builtin::BI__builtin_islessequal:
2974 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
2975 break;
2976 case Builtin::BI__builtin_islessgreater:
2977 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
2978 break;
2979 case Builtin::BI__builtin_isunordered:
2980 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
2981 break;
2982 }
2983 // ZExt bool to int type.
2984 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
2985 }
2986 case Builtin::BI__builtin_isnan: {
2987 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2988 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
2989 Value *V = EmitScalarExpr(E->getArg(0));
2990 V = Builder.CreateFCmpUNO(V, V, "cmp");
2991 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2992 }
2993
2994 case Builtin::BI__builtin_matrix_transpose: {
2995 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
2996 Value *MatValue = EmitScalarExpr(E->getArg(0));
2997 MatrixBuilder<CGBuilderTy> MB(Builder);
2998 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
2999 MatrixTy->getNumColumns());
3000 return RValue::get(Result);
3001 }
3002
3003 case Builtin::BI__builtin_matrix_column_major_load: {
3004 MatrixBuilder<CGBuilderTy> MB(Builder);
3005 // Emit everything that isn't dependent on the first parameter type
3006 Value *Stride = EmitScalarExpr(E->getArg(3));
3007 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
3008 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
3009 assert(PtrTy && "arg0 must be of pointer type");
3010 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
3011
3012 Address Src = EmitPointerWithAlignment(E->getArg(0));
3013 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
3014 E->getArg(0)->getExprLoc(), FD, 0);
3015 Value *Result = MB.CreateColumnMajorLoad(
3016 Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
3017 IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
3018 "matrix");
3019 return RValue::get(Result);
3020 }
3021
3022 case Builtin::BI__builtin_matrix_column_major_store: {
3023 MatrixBuilder<CGBuilderTy> MB(Builder);
3024 Value *Matrix = EmitScalarExpr(E->getArg(0));
3025 Address Dst = EmitPointerWithAlignment(E->getArg(1));
3026 Value *Stride = EmitScalarExpr(E->getArg(2));
3027
3028 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
3029 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
3030 assert(PtrTy && "arg1 must be of pointer type");
3031 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
3032
3033 EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
3034 E->getArg(1)->getExprLoc(), FD, 0);
3035 Value *Result = MB.CreateColumnMajorStore(
3036 Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
3037 Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
3038 return RValue::get(Result);
3039 }
3040
3041 case Builtin::BIfinite:
3042 case Builtin::BI__finite:
3043 case Builtin::BIfinitef:
3044 case Builtin::BI__finitef:
3045 case Builtin::BIfinitel:
3046 case Builtin::BI__finitel:
3047 case Builtin::BI__builtin_isinf:
3048 case Builtin::BI__builtin_isfinite: {
3049 // isinf(x) --> fabs(x) == infinity
3050 // isfinite(x) --> fabs(x) != infinity
3051 // x != NaN via the ordered compare in either case.
3052 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3053 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3054 Value *V = EmitScalarExpr(E->getArg(0));
3055 Value *Fabs = EmitFAbs(*this, V);
3056 Constant *Infinity = ConstantFP::getInfinity(V->getType());
3057 CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
3058 ? CmpInst::FCMP_OEQ
3059 : CmpInst::FCMP_ONE;
3060 Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
3061 return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
3062 }
3063
3064 case Builtin::BI__builtin_isinf_sign: {
3065 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
3066 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3067 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3068 Value *Arg = EmitScalarExpr(E->getArg(0));
3069 Value *AbsArg = EmitFAbs(*this, Arg);
3070 Value *IsInf = Builder.CreateFCmpOEQ(
3071 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
3072 Value *IsNeg = EmitSignBit(*this, Arg);
3073
3074 llvm::Type *IntTy = ConvertType(E->getType());
3075 Value *Zero = Constant::getNullValue(IntTy);
3076 Value *One = ConstantInt::get(IntTy, 1);
3077 Value *NegativeOne = ConstantInt::get(IntTy, -1);
3078 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
3079 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
3080 return RValue::get(Result);
3081 }
3082
3083 case Builtin::BI__builtin_isnormal: {
3084 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
3085 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3086 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3087 Value *V = EmitScalarExpr(E->getArg(0));
3088 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
3089
3090 Value *Abs = EmitFAbs(*this, V);
3091 Value *IsLessThanInf =
3092 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
3093 APFloat Smallest = APFloat::getSmallestNormalized(
3094 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
3095 Value *IsNormal =
3096 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
3097 "isnormal");
3098 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
3099 V = Builder.CreateAnd(V, IsNormal, "and");
3100 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
3101 }
3102
3103 case Builtin::BI__builtin_flt_rounds: {
3104 Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
3105
3106 llvm::Type *ResultType = ConvertType(E->getType());
3107 Value *Result = Builder.CreateCall(F);
3108 if (Result->getType() != ResultType)
3109 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3110 "cast");
3111 return RValue::get(Result);
3112 }
3113
3114 case Builtin::BI__builtin_fpclassify: {
3115 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3116 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3117 Value *V = EmitScalarExpr(E->getArg(5));
3118 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
3119
3120 // Create Result
3121 BasicBlock *Begin = Builder.GetInsertBlock();
3122 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
3123 Builder.SetInsertPoint(End);
3124 PHINode *Result =
3125 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
3126 "fpclassify_result");
3127
3128 // if (V==0) return FP_ZERO
3129 Builder.SetInsertPoint(Begin);
3130 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
3131 "iszero");
3132 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
3133 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
3134 Builder.CreateCondBr(IsZero, End, NotZero);
3135 Result->addIncoming(ZeroLiteral, Begin);
3136
3137 // if (V != V) return FP_NAN
3138 Builder.SetInsertPoint(NotZero);
3139 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
3140 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
3141 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
3142 Builder.CreateCondBr(IsNan, End, NotNan);
3143 Result->addIncoming(NanLiteral, NotZero);
3144
3145 // if (fabs(V) == infinity) return FP_INFINITY
3146 Builder.SetInsertPoint(NotNan);
3147 Value *VAbs = EmitFAbs(*this, V);
3148 Value *IsInf =
3149 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
3150 "isinf");
3151 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
3152 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
3153 Builder.CreateCondBr(IsInf, End, NotInf);
3154 Result->addIncoming(InfLiteral, NotNan);
3155
3156 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
3157 Builder.SetInsertPoint(NotInf);
3158 APFloat Smallest = APFloat::getSmallestNormalized(
3159 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
3160 Value *IsNormal =
3161 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
3162 "isnormal");
3163 Value *NormalResult =
3164 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
3165 EmitScalarExpr(E->getArg(3)));
3166 Builder.CreateBr(End);
3167 Result->addIncoming(NormalResult, NotInf);
3168
3169 // return Result
3170 Builder.SetInsertPoint(End);
3171 return RValue::get(Result);
3172 }
3173
3174 case Builtin::BIalloca:
3175 case Builtin::BI_alloca:
3176 case Builtin::BI__builtin_alloca: {
3177 Value *Size = EmitScalarExpr(E->getArg(0));
3178 const TargetInfo &TI = getContext().getTargetInfo();
3179 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
3180 const Align SuitableAlignmentInBytes =
3181 CGM.getContext()
3182 .toCharUnitsFromBits(TI.getSuitableAlign())
3183 .getAsAlign();
3184 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3185 AI->setAlignment(SuitableAlignmentInBytes);
3186 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
3187 return RValue::get(AI);
3188 }
3189
3190 case Builtin::BI__builtin_alloca_with_align: {
3191 Value *Size = EmitScalarExpr(E->getArg(0));
3192 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
3193 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
3194 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
3195 const Align AlignmentInBytes =
3196 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
3197 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3198 AI->setAlignment(AlignmentInBytes);
3199 initializeAlloca(*this, AI, Size, AlignmentInBytes);
3200 return RValue::get(AI);
3201 }
3202
3203 case Builtin::BIbzero:
3204 case Builtin::BI__builtin_bzero: {
3205 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3206 Value *SizeVal = EmitScalarExpr(E->getArg(1));
3207 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3208 E->getArg(0)->getExprLoc(), FD, 0);
3209 Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
3210 return RValue::get(nullptr);
3211 }
3212 case Builtin::BImemcpy:
3213 case Builtin::BI__builtin_memcpy:
3214 case Builtin::BImempcpy:
3215 case Builtin::BI__builtin_mempcpy: {
3216 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3217 Address Src = EmitPointerWithAlignment(E->getArg(1));
3218 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3219 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3220 E->getArg(0)->getExprLoc(), FD, 0);
3221 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3222 E->getArg(1)->getExprLoc(), FD, 1);
3223 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3224 if (BuiltinID == Builtin::BImempcpy ||
3225 BuiltinID == Builtin::BI__builtin_mempcpy)
3226 return RValue::get(Builder.CreateInBoundsGEP(Dest.getPointer(), SizeVal));
3227 else
3228 return RValue::get(Dest.getPointer());
3229 }
3230
3231 case Builtin::BI__builtin_memcpy_inline: {
3232 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3233 Address Src = EmitPointerWithAlignment(E->getArg(1));
3234 uint64_t Size =
3235 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
3236 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3237 E->getArg(0)->getExprLoc(), FD, 0);
3238 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3239 E->getArg(1)->getExprLoc(), FD, 1);
3240 Builder.CreateMemCpyInline(Dest, Src, Size);
3241 return RValue::get(nullptr);
3242 }
3243
3244 case Builtin::BI__builtin_char_memchr:
3245 BuiltinID = Builtin::BI__builtin_memchr;
3246 break;
3247
3248 case Builtin::BI__builtin___memcpy_chk: {
3249 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
3250 Expr::EvalResult SizeResult, DstSizeResult;
3251 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3252 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3253 break;
3254 llvm::APSInt Size = SizeResult.Val.getInt();
3255 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3256 if (Size.ugt(DstSize))
3257 break;
3258 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3259 Address Src = EmitPointerWithAlignment(E->getArg(1));
3260 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3261 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3262 return RValue::get(Dest.getPointer());
3263 }
3264
3265 case Builtin::BI__builtin_objc_memmove_collectable: {
3266 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
3267 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
3268 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3269 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
3270 DestAddr, SrcAddr, SizeVal);
3271 return RValue::get(DestAddr.getPointer());
3272 }
3273
3274 case Builtin::BI__builtin___memmove_chk: {
3275 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
3276 Expr::EvalResult SizeResult, DstSizeResult;
3277 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3278 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3279 break;
3280 llvm::APSInt Size = SizeResult.Val.getInt();
3281 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3282 if (Size.ugt(DstSize))
3283 break;
3284 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3285 Address Src = EmitPointerWithAlignment(E->getArg(1));
3286 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3287 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3288 return RValue::get(Dest.getPointer());
3289 }
3290
3291 case Builtin::BImemmove:
3292 case Builtin::BI__builtin_memmove: {
3293 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3294 Address Src = EmitPointerWithAlignment(E->getArg(1));
3295 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3296 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3297 E->getArg(0)->getExprLoc(), FD, 0);
3298 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3299 E->getArg(1)->getExprLoc(), FD, 1);
3300 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3301 return RValue::get(Dest.getPointer());
3302 }
3303 case Builtin::BImemset:
3304 case Builtin::BI__builtin_memset: {
3305 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3306 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3307 Builder.getInt8Ty());
3308 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3309 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3310 E->getArg(0)->getExprLoc(), FD, 0);
3311 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3312 return RValue::get(Dest.getPointer());
3313 }
3314 case Builtin::BI__builtin___memset_chk: {
3315 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
3316 Expr::EvalResult SizeResult, DstSizeResult;
3317 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3318 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3319 break;
3320 llvm::APSInt Size = SizeResult.Val.getInt();
3321 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3322 if (Size.ugt(DstSize))
3323 break;
3324 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3325 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3326 Builder.getInt8Ty());
3327 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3328 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3329 return RValue::get(Dest.getPointer());
3330 }
3331 case Builtin::BI__builtin_wmemcmp: {
3332 // The MSVC runtime library does not provide a definition of wmemcmp, so we
3333 // need an inline implementation.
3334 if (!getTarget().getTriple().isOSMSVCRT())
3335 break;
3336
3337 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
3338
3339 Value *Dst = EmitScalarExpr(E->getArg(0));
3340 Value *Src = EmitScalarExpr(E->getArg(1));
3341 Value *Size = EmitScalarExpr(E->getArg(2));
3342
3343 BasicBlock *Entry = Builder.GetInsertBlock();
3344 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
3345 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
3346 BasicBlock *Next = createBasicBlock("wmemcmp.next");
3347 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
3348 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
3349 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
3350
3351 EmitBlock(CmpGT);
3352 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
3353 DstPhi->addIncoming(Dst, Entry);
3354 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
3355 SrcPhi->addIncoming(Src, Entry);
3356 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
3357 SizePhi->addIncoming(Size, Entry);
3358 CharUnits WCharAlign =
3359 getContext().getTypeAlignInChars(getContext().WCharTy);
3360 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
3361 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
3362 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
3363 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
3364
3365 EmitBlock(CmpLT);
3366 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
3367 Builder.CreateCondBr(DstLtSrc, Exit, Next);
3368
3369 EmitBlock(Next);
3370 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
3371 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
3372 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
3373 Value *NextSizeEq0 =
3374 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
3375 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
3376 DstPhi->addIncoming(NextDst, Next);
3377 SrcPhi->addIncoming(NextSrc, Next);
3378 SizePhi->addIncoming(NextSize, Next);
3379
3380 EmitBlock(Exit);
3381 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
3382 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
3383 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
3384 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
3385 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
3386 return RValue::get(Ret);
3387 }
3388 case Builtin::BI__builtin_dwarf_cfa: {
3389 // The offset in bytes from the first argument to the CFA.
3390 //
3391 // Why on earth is this in the frontend? Is there any reason at
3392 // all that the backend can't reasonably determine this while
3393 // lowering llvm.eh.dwarf.cfa()?
3394 //
3395 // TODO: If there's a satisfactory reason, add a target hook for
3396 // this instead of hard-coding 0, which is correct for most targets.
3397 int32_t Offset = 0;
3398
3399 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
3400 return RValue::get(Builder.CreateCall(F,
3401 llvm::ConstantInt::get(Int32Ty, Offset)));
3402 }
3403 case Builtin::BI__builtin_return_address: {
3404 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
3405 getContext().UnsignedIntTy);
3406 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
3407 return RValue::get(Builder.CreateCall(F, Depth));
3408 }
3409 case Builtin::BI_ReturnAddress: {
3410 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
3411 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
3412 }
3413 case Builtin::BI__builtin_frame_address: {
3414 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
3415 getContext().UnsignedIntTy);
3416 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
3417 return RValue::get(Builder.CreateCall(F, Depth));
3418 }
3419 case Builtin::BI__builtin_extract_return_addr: {
3420 Value *Address = EmitScalarExpr(E->getArg(0));
3421 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
3422 return RValue::get(Result);
3423 }
3424 case Builtin::BI__builtin_frob_return_addr: {
3425 Value *Address = EmitScalarExpr(E->getArg(0));
3426 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
3427 return RValue::get(Result);
3428 }
3429 case Builtin::BI__builtin_dwarf_sp_column: {
3430 llvm::IntegerType *Ty
3431 = cast<llvm::IntegerType>(ConvertType(E->getType()));
3432 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
3433 if (Column == -1) {
3434 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
3435 return RValue::get(llvm::UndefValue::get(Ty));
3436 }
3437 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
3438 }
3439 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
3440 Value *Address = EmitScalarExpr(E->getArg(0));
3441 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
3442 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
3443 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
3444 }
3445 case Builtin::BI__builtin_eh_return: {
3446 Value *Int = EmitScalarExpr(E->getArg(0));
3447 Value *Ptr = EmitScalarExpr(E->getArg(1));
3448
3449 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
3450 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
3451 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
3452 Function *F =
3453 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
3454 : Intrinsic::eh_return_i64);
3455 Builder.CreateCall(F, {Int, Ptr});
3456 Builder.CreateUnreachable();
3457
3458 // We do need to preserve an insertion point.
3459 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
3460
3461 return RValue::get(nullptr);
3462 }
3463 case Builtin::BI__builtin_unwind_init: {
3464 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
3465 return RValue::get(Builder.CreateCall(F));
3466 }
3467 case Builtin::BI__builtin_extend_pointer: {
3468 // Extends a pointer to the size of an _Unwind_Word, which is
3469 // uint64_t on all platforms. Generally this gets poked into a
3470 // register and eventually used as an address, so if the
3471 // addressing registers are wider than pointers and the platform
3472 // doesn't implicitly ignore high-order bits when doing
3473 // addressing, we need to make sure we zext / sext based on
3474 // the platform's expectations.
3475 //
3476 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
3477
3478 // Cast the pointer to intptr_t.
3479 Value *Ptr = EmitScalarExpr(E->getArg(0));
3480 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
3481
3482 // If that's 64 bits, we're done.
3483 if (IntPtrTy->getBitWidth() == 64)
3484 return RValue::get(Result);
3485
3486 // Otherwise, ask the codegen data what to do.
3487 if (getTargetHooks().extendPointerWithSExt())
3488 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
3489 else
3490 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
3491 }
3492 case Builtin::BI__builtin_setjmp: {
3493 // Buffer is a void**.
3494 Address Buf = EmitPointerWithAlignment(E->getArg(0));
3495
3496 // Store the frame pointer to the setjmp buffer.
3497 Value *FrameAddr = Builder.CreateCall(
3498 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
3499 ConstantInt::get(Int32Ty, 0));
3500 Builder.CreateStore(FrameAddr, Buf);
3501
3502 // Store the stack pointer to the setjmp buffer.
3503 Value *StackAddr =
3504 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
3505 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
3506 Builder.CreateStore(StackAddr, StackSaveSlot);
3507
3508 // Call LLVM's EH setjmp, which is lightweight.
3509 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
3510 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
3511 return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
3512 }
3513 case Builtin::BI__builtin_longjmp: {
3514 Value *Buf = EmitScalarExpr(E->getArg(0));
3515 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
3516
3517 // Call LLVM's EH longjmp, which is lightweight.
3518 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
3519
3520 // longjmp doesn't return; mark this as unreachable.
3521 Builder.CreateUnreachable();
3522
3523 // We do need to preserve an insertion point.
3524 EmitBlock(createBasicBlock("longjmp.cont"));
3525
3526 return RValue::get(nullptr);
3527 }
3528 case Builtin::BI__builtin_launder: {
3529 const Expr *Arg = E->getArg(0);
3530 QualType ArgTy = Arg->getType()->getPointeeType();
3531 Value *Ptr = EmitScalarExpr(Arg);
3532 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
3533 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
3534
3535 return RValue::get(Ptr);
3536 }
3537 case Builtin::BI__sync_fetch_and_add:
3538 case Builtin::BI__sync_fetch_and_sub:
3539 case Builtin::BI__sync_fetch_and_or:
3540 case Builtin::BI__sync_fetch_and_and:
3541 case Builtin::BI__sync_fetch_and_xor:
3542 case Builtin::BI__sync_fetch_and_nand:
3543 case Builtin::BI__sync_add_and_fetch:
3544 case Builtin::BI__sync_sub_and_fetch:
3545 case Builtin::BI__sync_and_and_fetch:
3546 case Builtin::BI__sync_or_and_fetch:
3547 case Builtin::BI__sync_xor_and_fetch:
3548 case Builtin::BI__sync_nand_and_fetch:
3549 case Builtin::BI__sync_val_compare_and_swap:
3550 case Builtin::BI__sync_bool_compare_and_swap:
3551 case Builtin::BI__sync_lock_test_and_set:
3552 case Builtin::BI__sync_lock_release:
3553 case Builtin::BI__sync_swap:
3554 llvm_unreachable("Shouldn't make it through sema");
3555 case Builtin::BI__sync_fetch_and_add_1:
3556 case Builtin::BI__sync_fetch_and_add_2:
3557 case Builtin::BI__sync_fetch_and_add_4:
3558 case Builtin::BI__sync_fetch_and_add_8:
3559 case Builtin::BI__sync_fetch_and_add_16:
3560 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
3561 case Builtin::BI__sync_fetch_and_sub_1:
3562 case Builtin::BI__sync_fetch_and_sub_2:
3563 case Builtin::BI__sync_fetch_and_sub_4:
3564 case Builtin::BI__sync_fetch_and_sub_8:
3565 case Builtin::BI__sync_fetch_and_sub_16:
3566 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
3567 case Builtin::BI__sync_fetch_and_or_1:
3568 case Builtin::BI__sync_fetch_and_or_2:
3569 case Builtin::BI__sync_fetch_and_or_4:
3570 case Builtin::BI__sync_fetch_and_or_8:
3571 case Builtin::BI__sync_fetch_and_or_16:
3572 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
3573 case Builtin::BI__sync_fetch_and_and_1:
3574 case Builtin::BI__sync_fetch_and_and_2:
3575 case Builtin::BI__sync_fetch_and_and_4:
3576 case Builtin::BI__sync_fetch_and_and_8:
3577 case Builtin::BI__sync_fetch_and_and_16:
3578 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
3579 case Builtin::BI__sync_fetch_and_xor_1:
3580 case Builtin::BI__sync_fetch_and_xor_2:
3581 case Builtin::BI__sync_fetch_and_xor_4:
3582 case Builtin::BI__sync_fetch_and_xor_8:
3583 case Builtin::BI__sync_fetch_and_xor_16:
3584 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
3585 case Builtin::BI__sync_fetch_and_nand_1:
3586 case Builtin::BI__sync_fetch_and_nand_2:
3587 case Builtin::BI__sync_fetch_and_nand_4:
3588 case Builtin::BI__sync_fetch_and_nand_8:
3589 case Builtin::BI__sync_fetch_and_nand_16:
3590 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
3591
3592 // Clang extensions: not overloaded yet.
3593 case Builtin::BI__sync_fetch_and_min:
3594 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
3595 case Builtin::BI__sync_fetch_and_max:
3596 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
3597 case Builtin::BI__sync_fetch_and_umin:
3598 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
3599 case Builtin::BI__sync_fetch_and_umax:
3600 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
3601
3602 case Builtin::BI__sync_add_and_fetch_1:
3603 case Builtin::BI__sync_add_and_fetch_2:
3604 case Builtin::BI__sync_add_and_fetch_4:
3605 case Builtin::BI__sync_add_and_fetch_8:
3606 case Builtin::BI__sync_add_and_fetch_16:
3607 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
3608 llvm::Instruction::Add);
3609 case Builtin::BI__sync_sub_and_fetch_1:
3610 case Builtin::BI__sync_sub_and_fetch_2:
3611 case Builtin::BI__sync_sub_and_fetch_4:
3612 case Builtin::BI__sync_sub_and_fetch_8:
3613 case Builtin::BI__sync_sub_and_fetch_16:
3614 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
3615 llvm::Instruction::Sub);
3616 case Builtin::BI__sync_and_and_fetch_1:
3617 case Builtin::BI__sync_and_and_fetch_2:
3618 case Builtin::BI__sync_and_and_fetch_4:
3619 case Builtin::BI__sync_and_and_fetch_8:
3620 case Builtin::BI__sync_and_and_fetch_16:
3621 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
3622 llvm::Instruction::And);
3623 case Builtin::BI__sync_or_and_fetch_1:
3624 case Builtin::BI__sync_or_and_fetch_2:
3625 case Builtin::BI__sync_or_and_fetch_4:
3626 case Builtin::BI__sync_or_and_fetch_8:
3627 case Builtin::BI__sync_or_and_fetch_16:
3628 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
3629 llvm::Instruction::Or);
3630 case Builtin::BI__sync_xor_and_fetch_1:
3631 case Builtin::BI__sync_xor_and_fetch_2:
3632 case Builtin::BI__sync_xor_and_fetch_4:
3633 case Builtin::BI__sync_xor_and_fetch_8:
3634 case Builtin::BI__sync_xor_and_fetch_16:
3635 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
3636 llvm::Instruction::Xor);
3637 case Builtin::BI__sync_nand_and_fetch_1:
3638 case Builtin::BI__sync_nand_and_fetch_2:
3639 case Builtin::BI__sync_nand_and_fetch_4:
3640 case Builtin::BI__sync_nand_and_fetch_8:
3641 case Builtin::BI__sync_nand_and_fetch_16:
3642 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
3643 llvm::Instruction::And, true);
3644
3645 case Builtin::BI__sync_val_compare_and_swap_1:
3646 case Builtin::BI__sync_val_compare_and_swap_2:
3647 case Builtin::BI__sync_val_compare_and_swap_4:
3648 case Builtin::BI__sync_val_compare_and_swap_8:
3649 case Builtin::BI__sync_val_compare_and_swap_16:
3650 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
3651
3652 case Builtin::BI__sync_bool_compare_and_swap_1:
3653 case Builtin::BI__sync_bool_compare_and_swap_2:
3654 case Builtin::BI__sync_bool_compare_and_swap_4:
3655 case Builtin::BI__sync_bool_compare_and_swap_8:
3656 case Builtin::BI__sync_bool_compare_and_swap_16:
3657 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
3658
3659 case Builtin::BI__sync_swap_1:
3660 case Builtin::BI__sync_swap_2:
3661 case Builtin::BI__sync_swap_4:
3662 case Builtin::BI__sync_swap_8:
3663 case Builtin::BI__sync_swap_16:
3664 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3665
3666 case Builtin::BI__sync_lock_test_and_set_1:
3667 case Builtin::BI__sync_lock_test_and_set_2:
3668 case Builtin::BI__sync_lock_test_and_set_4:
3669 case Builtin::BI__sync_lock_test_and_set_8:
3670 case Builtin::BI__sync_lock_test_and_set_16:
3671 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3672
3673 case Builtin::BI__sync_lock_release_1:
3674 case Builtin::BI__sync_lock_release_2:
3675 case Builtin::BI__sync_lock_release_4:
3676 case Builtin::BI__sync_lock_release_8:
3677 case Builtin::BI__sync_lock_release_16: {
3678 Value *Ptr = EmitScalarExpr(E->getArg(0));
3679 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
3680 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
3681 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
3682 StoreSize.getQuantity() * 8);
3683 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
3684 llvm::StoreInst *Store =
3685 Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
3686 StoreSize);
3687 Store->setAtomic(llvm::AtomicOrdering::Release);
3688 return RValue::get(nullptr);
3689 }
3690
3691 case Builtin::BI__sync_synchronize: {
3692 // We assume this is supposed to correspond to a C++0x-style
3693 // sequentially-consistent fence (i.e. this is only usable for
3694 // synchronization, not device I/O or anything like that). This intrinsic
3695 // is really badly designed in the sense that in theory, there isn't
3696 // any way to safely use it... but in practice, it mostly works
3697 // to use it with non-atomic loads and stores to get acquire/release
3698 // semantics.
3699 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
3700 return RValue::get(nullptr);
3701 }
3702
3703 case Builtin::BI__builtin_nontemporal_load:
3704 return RValue::get(EmitNontemporalLoad(*this, E));
3705 case Builtin::BI__builtin_nontemporal_store:
3706 return RValue::get(EmitNontemporalStore(*this, E));
3707 case Builtin::BI__c11_atomic_is_lock_free:
3708 case Builtin::BI__atomic_is_lock_free: {
3709 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
3710 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
3711 // _Atomic(T) is always properly-aligned.
3712 const char *LibCallName = "__atomic_is_lock_free";
3713 CallArgList Args;
3714 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
3715 getContext().getSizeType());
3716 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
3717 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
3718 getContext().VoidPtrTy);
3719 else
3720 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
3721 getContext().VoidPtrTy);
3722 const CGFunctionInfo &FuncInfo =
3723 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
3724 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3725 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3726 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
3727 ReturnValueSlot(), Args);
3728 }
3729
3730 case Builtin::BI__atomic_test_and_set: {
3731 // Look at the argument type to determine whether this is a volatile
3732 // operation. The parameter type is always volatile.
3733 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3734 bool Volatile =
3735 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3736
3737 Value *Ptr = EmitScalarExpr(E->getArg(0));
3738 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
3739 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3740 Value *NewVal = Builder.getInt8(1);
3741 Value *Order = EmitScalarExpr(E->getArg(1));
3742 if (isa<llvm::ConstantInt>(Order)) {
3743 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3744 AtomicRMWInst *Result = nullptr;
3745 switch (ord) {
3746 case 0: // memory_order_relaxed
3747 default: // invalid order
3748 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3749 llvm::AtomicOrdering::Monotonic);
3750 break;
3751 case 1: // memory_order_consume
3752 case 2: // memory_order_acquire
3753 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3754 llvm::AtomicOrdering::Acquire);
3755 break;
3756 case 3: // memory_order_release
3757 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3758 llvm::AtomicOrdering::Release);
3759 break;
3760 case 4: // memory_order_acq_rel
3761
3762 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3763 llvm::AtomicOrdering::AcquireRelease);
3764 break;
3765 case 5: // memory_order_seq_cst
3766 Result = Builder.CreateAtomicRMW(
3767 llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3768 llvm::AtomicOrdering::SequentiallyConsistent);
3769 break;
3770 }
3771 Result->setVolatile(Volatile);
3772 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3773 }
3774
3775 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3776
3777 llvm::BasicBlock *BBs[5] = {
3778 createBasicBlock("monotonic", CurFn),
3779 createBasicBlock("acquire", CurFn),
3780 createBasicBlock("release", CurFn),
3781 createBasicBlock("acqrel", CurFn),
3782 createBasicBlock("seqcst", CurFn)
3783 };
3784 llvm::AtomicOrdering Orders[5] = {
3785 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
3786 llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
3787 llvm::AtomicOrdering::SequentiallyConsistent};
3788
3789 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3790 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3791
3792 Builder.SetInsertPoint(ContBB);
3793 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
3794
3795 for (unsigned i = 0; i < 5; ++i) {
3796 Builder.SetInsertPoint(BBs[i]);
3797 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
3798 Ptr, NewVal, Orders[i]);
3799 RMW->setVolatile(Volatile);
3800 Result->addIncoming(RMW, BBs[i]);
3801 Builder.CreateBr(ContBB);
3802 }
3803
3804 SI->addCase(Builder.getInt32(0), BBs[0]);
3805 SI->addCase(Builder.getInt32(1), BBs[1]);
3806 SI->addCase(Builder.getInt32(2), BBs[1]);
3807 SI->addCase(Builder.getInt32(3), BBs[2]);
3808 SI->addCase(Builder.getInt32(4), BBs[3]);
3809 SI->addCase(Builder.getInt32(5), BBs[4]);
3810
3811 Builder.SetInsertPoint(ContBB);
3812 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3813 }
3814
3815 case Builtin::BI__atomic_clear: {
3816 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3817 bool Volatile =
3818 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3819
3820 Address Ptr = EmitPointerWithAlignment(E->getArg(0));
3821 unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
3822 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3823 Value *NewVal = Builder.getInt8(0);
3824 Value *Order = EmitScalarExpr(E->getArg(1));
3825 if (isa<llvm::ConstantInt>(Order)) {
3826 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3827 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3828 switch (ord) {
3829 case 0: // memory_order_relaxed
3830 default: // invalid order
3831 Store->setOrdering(llvm::AtomicOrdering::Monotonic);
3832 break;
3833 case 3: // memory_order_release
3834 Store->setOrdering(llvm::AtomicOrdering::Release);
3835 break;
3836 case 5: // memory_order_seq_cst
3837 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
3838 break;
3839 }
3840 return RValue::get(nullptr);
3841 }
3842
3843 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3844
3845 llvm::BasicBlock *BBs[3] = {
3846 createBasicBlock("monotonic", CurFn),
3847 createBasicBlock("release", CurFn),
3848 createBasicBlock("seqcst", CurFn)
3849 };
3850 llvm::AtomicOrdering Orders[3] = {
3851 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
3852 llvm::AtomicOrdering::SequentiallyConsistent};
3853
3854 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3855 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3856
3857 for (unsigned i = 0; i < 3; ++i) {
3858 Builder.SetInsertPoint(BBs[i]);
3859 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3860 Store->setOrdering(Orders[i]);
3861 Builder.CreateBr(ContBB);
3862 }
3863
3864 SI->addCase(Builder.getInt32(0), BBs[0]);
3865 SI->addCase(Builder.getInt32(3), BBs[1]);
3866 SI->addCase(Builder.getInt32(5), BBs[2]);
3867
3868 Builder.SetInsertPoint(ContBB);
3869 return RValue::get(nullptr);
3870 }
3871
3872 case Builtin::BI__atomic_thread_fence:
3873 case Builtin::BI__atomic_signal_fence:
3874 case Builtin::BI__c11_atomic_thread_fence:
3875 case Builtin::BI__c11_atomic_signal_fence: {
3876 llvm::SyncScope::ID SSID;
3877 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
3878 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
3879 SSID = llvm::SyncScope::SingleThread;
3880 else
3881 SSID = llvm::SyncScope::System;
3882 Value *Order = EmitScalarExpr(E->getArg(0));
3883 if (isa<llvm::ConstantInt>(Order)) {
3884 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3885 switch (ord) {
3886 case 0: // memory_order_relaxed
3887 default: // invalid order
3888 break;
3889 case 1: // memory_order_consume
3890 case 2: // memory_order_acquire
3891 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3892 break;
3893 case 3: // memory_order_release
3894 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3895 break;
3896 case 4: // memory_order_acq_rel
3897 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3898 break;
3899 case 5: // memory_order_seq_cst
3900 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3901 break;
3902 }
3903 return RValue::get(nullptr);
3904 }
3905
3906 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
3907 AcquireBB = createBasicBlock("acquire", CurFn);
3908 ReleaseBB = createBasicBlock("release", CurFn);
3909 AcqRelBB = createBasicBlock("acqrel", CurFn);
3910 SeqCstBB = createBasicBlock("seqcst", CurFn);
3911 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3912
3913 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3914 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
3915
3916 Builder.SetInsertPoint(AcquireBB);
3917 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3918 Builder.CreateBr(ContBB);
3919 SI->addCase(Builder.getInt32(1), AcquireBB);
3920 SI->addCase(Builder.getInt32(2), AcquireBB);
3921
3922 Builder.SetInsertPoint(ReleaseBB);
3923 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3924 Builder.CreateBr(ContBB);
3925 SI->addCase(Builder.getInt32(3), ReleaseBB);
3926
3927 Builder.SetInsertPoint(AcqRelBB);
3928 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3929 Builder.CreateBr(ContBB);
3930 SI->addCase(Builder.getInt32(4), AcqRelBB);
3931
3932 Builder.SetInsertPoint(SeqCstBB);
3933 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3934 Builder.CreateBr(ContBB);
3935 SI->addCase(Builder.getInt32(5), SeqCstBB);
3936
3937 Builder.SetInsertPoint(ContBB);
3938 return RValue::get(nullptr);
3939 }
3940
3941 case Builtin::BI__builtin_signbit:
3942 case Builtin::BI__builtin_signbitf:
3943 case Builtin::BI__builtin_signbitl: {
3944 return RValue::get(
3945 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
3946 ConvertType(E->getType())));
3947 }
3948 case Builtin::BI__warn_memset_zero_len:
3949 return RValue::getIgnored();
3950 case Builtin::BI__annotation: {
3951 // Re-encode each wide string to UTF8 and make an MDString.
3952 SmallVector<Metadata *, 1> Strings;
3953 for (const Expr *Arg : E->arguments()) {
3954 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
3955 assert(Str->getCharByteWidth() == 2);
3956 StringRef WideBytes = Str->getBytes();
3957 std::string StrUtf8;
3958 if (!convertUTF16ToUTF8String(
3959 makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
3960 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
3961 continue;
3962 }
3963 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
3964 }
3965
3966 // Build and MDTuple of MDStrings and emit the intrinsic call.
3967 llvm::Function *F =
3968 CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
3969 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
3970 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
3971 return RValue::getIgnored();
3972 }
3973 case Builtin::BI__builtin_annotation: {
3974 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
3975 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
3976 AnnVal->getType());
3977
3978 // Get the annotation string, go through casts. Sema requires this to be a
3979 // non-wide string literal, potentially casted, so the cast<> is safe.
3980 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
3981 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
3982 return RValue::get(
3983 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
3984 }
3985 case Builtin::BI__builtin_addcb:
3986 case Builtin::BI__builtin_addcs:
3987 case Builtin::BI__builtin_addc:
3988 case Builtin::BI__builtin_addcl:
3989 case Builtin::BI__builtin_addcll:
3990 case Builtin::BI__builtin_subcb:
3991 case Builtin::BI__builtin_subcs:
3992 case Builtin::BI__builtin_subc:
3993 case Builtin::BI__builtin_subcl:
3994 case Builtin::BI__builtin_subcll: {
3995
3996 // We translate all of these builtins from expressions of the form:
3997 // int x = ..., y = ..., carryin = ..., carryout, result;
3998 // result = __builtin_addc(x, y, carryin, &carryout);
3999 //
4000 // to LLVM IR of the form:
4001 //
4002 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
4003 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
4004 // %carry1 = extractvalue {i32, i1} %tmp1, 1
4005 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
4006 // i32 %carryin)
4007 // %result = extractvalue {i32, i1} %tmp2, 0
4008 // %carry2 = extractvalue {i32, i1} %tmp2, 1
4009 // %tmp3 = or i1 %carry1, %carry2
4010 // %tmp4 = zext i1 %tmp3 to i32
4011 // store i32 %tmp4, i32* %carryout
4012
4013 // Scalarize our inputs.
4014 llvm::Value *X = EmitScalarExpr(E->getArg(0));
4015 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4016 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
4017 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
4018
4019 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
4020 llvm::Intrinsic::ID IntrinsicId;
4021 switch (BuiltinID) {
4022 default: llvm_unreachable("Unknown multiprecision builtin id.");
4023 case Builtin::BI__builtin_addcb:
4024 case Builtin::BI__builtin_addcs:
4025 case Builtin::BI__builtin_addc:
4026 case Builtin::BI__builtin_addcl:
4027 case Builtin::BI__builtin_addcll:
4028 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4029 break;
4030 case Builtin::BI__builtin_subcb:
4031 case Builtin::BI__builtin_subcs:
4032 case Builtin::BI__builtin_subc:
4033 case Builtin::BI__builtin_subcl:
4034 case Builtin::BI__builtin_subcll:
4035 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4036 break;
4037 }
4038
4039 // Construct our resulting LLVM IR expression.
4040 llvm::Value *Carry1;
4041 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
4042 X, Y, Carry1);
4043 llvm::Value *Carry2;
4044 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
4045 Sum1, Carryin, Carry2);
4046 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
4047 X->getType());
4048 Builder.CreateStore(CarryOut, CarryOutPtr);
4049 return RValue::get(Sum2);
4050 }
4051
4052 case Builtin::BI__builtin_add_overflow:
4053 case Builtin::BI__builtin_sub_overflow:
4054 case Builtin::BI__builtin_mul_overflow: {
4055 const clang::Expr *LeftArg = E->getArg(0);
4056 const clang::Expr *RightArg = E->getArg(1);
4057 const clang::Expr *ResultArg = E->getArg(2);
4058
4059 clang::QualType ResultQTy =
4060 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
4061
4062 WidthAndSignedness LeftInfo =
4063 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
4064 WidthAndSignedness RightInfo =
4065 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
4066 WidthAndSignedness ResultInfo =
4067 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
4068
4069 // Handle mixed-sign multiplication as a special case, because adding
4070 // runtime or backend support for our generic irgen would be too expensive.
4071 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
4072 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
4073 RightInfo, ResultArg, ResultQTy,
4074 ResultInfo);
4075
4076 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
4077 ResultInfo))
4078 return EmitCheckedUnsignedMultiplySignedResult(
4079 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
4080 ResultInfo);
4081
4082 WidthAndSignedness EncompassingInfo =
4083 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
4084
4085 llvm::Type *EncompassingLLVMTy =
4086 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
4087
4088 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
4089
4090 llvm::Intrinsic::ID IntrinsicId;
4091 switch (BuiltinID) {
4092 default:
4093 llvm_unreachable("Unknown overflow builtin id.");
4094 case Builtin::BI__builtin_add_overflow:
4095 IntrinsicId = EncompassingInfo.Signed
4096 ? llvm::Intrinsic::sadd_with_overflow
4097 : llvm::Intrinsic::uadd_with_overflow;
4098 break;
4099 case Builtin::BI__builtin_sub_overflow:
4100 IntrinsicId = EncompassingInfo.Signed
4101 ? llvm::Intrinsic::ssub_with_overflow
4102 : llvm::Intrinsic::usub_with_overflow;
4103 break;
4104 case Builtin::BI__builtin_mul_overflow:
4105 IntrinsicId = EncompassingInfo.Signed
4106 ? llvm::Intrinsic::smul_with_overflow
4107 : llvm::Intrinsic::umul_with_overflow;
4108 break;
4109 }
4110
4111 llvm::Value *Left = EmitScalarExpr(LeftArg);
4112 llvm::Value *Right = EmitScalarExpr(RightArg);
4113 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
4114
4115 // Extend each operand to the encompassing type.
4116 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
4117 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
4118
4119 // Perform the operation on the extended values.
4120 llvm::Value *Overflow, *Result;
4121 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
4122
4123 if (EncompassingInfo.Width > ResultInfo.Width) {
4124 // The encompassing type is wider than the result type, so we need to
4125 // truncate it.
4126 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
4127
4128 // To see if the truncation caused an overflow, we will extend
4129 // the result and then compare it to the original result.
4130 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
4131 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
4132 llvm::Value *TruncationOverflow =
4133 Builder.CreateICmpNE(Result, ResultTruncExt);
4134
4135 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
4136 Result = ResultTrunc;
4137 }
4138
4139 // Finally, store the result using the pointer.
4140 bool isVolatile =
4141 ResultArg->getType()->getPointeeType().isVolatileQualified();
4142 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
4143
4144 return RValue::get(Overflow);
4145 }
4146
4147 case Builtin::BI__builtin_uadd_overflow:
4148 case Builtin::BI__builtin_uaddl_overflow:
4149 case Builtin::BI__builtin_uaddll_overflow:
4150 case Builtin::BI__builtin_usub_overflow:
4151 case Builtin::BI__builtin_usubl_overflow:
4152 case Builtin::BI__builtin_usubll_overflow:
4153 case Builtin::BI__builtin_umul_overflow:
4154 case Builtin::BI__builtin_umull_overflow:
4155 case Builtin::BI__builtin_umulll_overflow:
4156 case Builtin::BI__builtin_sadd_overflow:
4157 case Builtin::BI__builtin_saddl_overflow:
4158 case Builtin::BI__builtin_saddll_overflow:
4159 case Builtin::BI__builtin_ssub_overflow:
4160 case Builtin::BI__builtin_ssubl_overflow:
4161 case Builtin::BI__builtin_ssubll_overflow:
4162 case Builtin::BI__builtin_smul_overflow:
4163 case Builtin::BI__builtin_smull_overflow:
4164 case Builtin::BI__builtin_smulll_overflow: {
4165
4166 // We translate all of these builtins directly to the relevant llvm IR node.
4167
4168 // Scalarize our inputs.
4169 llvm::Value *X = EmitScalarExpr(E->getArg(0));
4170 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4171 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
4172
4173 // Decide which of the overflow intrinsics we are lowering to:
4174 llvm::Intrinsic::ID IntrinsicId;
4175 switch (BuiltinID) {
4176 default: llvm_unreachable("Unknown overflow builtin id.");
4177 case Builtin::BI__builtin_uadd_overflow:
4178 case Builtin::BI__builtin_uaddl_overflow:
4179 case Builtin::BI__builtin_uaddll_overflow:
4180 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4181 break;
4182 case Builtin::BI__builtin_usub_overflow:
4183 case Builtin::BI__builtin_usubl_overflow:
4184 case Builtin::BI__builtin_usubll_overflow:
4185 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4186 break;
4187 case Builtin::BI__builtin_umul_overflow:
4188 case Builtin::BI__builtin_umull_overflow:
4189 case Builtin::BI__builtin_umulll_overflow:
4190 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
4191 break;
4192 case Builtin::BI__builtin_sadd_overflow:
4193 case Builtin::BI__builtin_saddl_overflow:
4194 case Builtin::BI__builtin_saddll_overflow:
4195 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
4196 break;
4197 case Builtin::BI__builtin_ssub_overflow:
4198 case Builtin::BI__builtin_ssubl_overflow:
4199 case Builtin::BI__builtin_ssubll_overflow:
4200 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
4201 break;
4202 case Builtin::BI__builtin_smul_overflow:
4203 case Builtin::BI__builtin_smull_overflow:
4204 case Builtin::BI__builtin_smulll_overflow:
4205 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
4206 break;
4207 }
4208
4209
4210 llvm::Value *Carry;
4211 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
4212 Builder.CreateStore(Sum, SumOutPtr);
4213
4214 return RValue::get(Carry);
4215 }
4216 case Builtin::BI__builtin_addressof:
4217 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
4218 case Builtin::BI__builtin_operator_new:
4219 return EmitBuiltinNewDeleteCall(
4220 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
4221 case Builtin::BI__builtin_operator_delete:
4222 return EmitBuiltinNewDeleteCall(
4223 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
4224
4225 case Builtin::BI__builtin_is_aligned:
4226 return EmitBuiltinIsAligned(E);
4227 case Builtin::BI__builtin_align_up:
4228 return EmitBuiltinAlignTo(E, true);
4229 case Builtin::BI__builtin_align_down:
4230 return EmitBuiltinAlignTo(E, false);
4231
4232 case Builtin::BI__noop:
4233 // __noop always evaluates to an integer literal zero.
4234 return RValue::get(ConstantInt::get(IntTy, 0));
4235 case Builtin::BI__builtin_call_with_static_chain: {
4236 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
4237 const Expr *Chain = E->getArg(1);
4238 return EmitCall(Call->getCallee()->getType(),
4239 EmitCallee(Call->getCallee()), Call, ReturnValue,
4240 EmitScalarExpr(Chain));
4241 }
4242 case Builtin::BI_InterlockedExchange8:
4243 case Builtin::BI_InterlockedExchange16:
4244 case Builtin::BI_InterlockedExchange:
4245 case Builtin::BI_InterlockedExchangePointer:
4246 return RValue::get(
4247 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
4248 case Builtin::BI_InterlockedCompareExchangePointer:
4249 case Builtin::BI_InterlockedCompareExchangePointer_nf: {
4250 llvm::Type *RTy;
4251 llvm::IntegerType *IntType =
4252 IntegerType::get(getLLVMContext(),
4253 getContext().getTypeSize(E->getType()));
4254 llvm::Type *IntPtrType = IntType->getPointerTo();
4255
4256 llvm::Value *Destination =
4257 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
4258
4259 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
4260 RTy = Exchange->getType();
4261 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
4262
4263 llvm::Value *Comparand =
4264 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
4265
4266 auto Ordering =
4267 BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
4268 AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
4269
4270 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
4271 Ordering, Ordering);
4272 Result->setVolatile(true);
4273
4274 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
4275 0),
4276 RTy));
4277 }
4278 case Builtin::BI_InterlockedCompareExchange8:
4279 case Builtin::BI_InterlockedCompareExchange16:
4280 case Builtin::BI_InterlockedCompareExchange:
4281 case Builtin::BI_InterlockedCompareExchange64:
4282 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
4283 case Builtin::BI_InterlockedIncrement16:
4284 case Builtin::BI_InterlockedIncrement:
4285 return RValue::get(
4286 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
4287 case Builtin::BI_InterlockedDecrement16:
4288 case Builtin::BI_InterlockedDecrement:
4289 return RValue::get(
4290 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
4291 case Builtin::BI_InterlockedAnd8:
4292 case Builtin::BI_InterlockedAnd16:
4293 case Builtin::BI_InterlockedAnd:
4294 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
4295 case Builtin::BI_InterlockedExchangeAdd8:
4296 case Builtin::BI_InterlockedExchangeAdd16:
4297 case Builtin::BI_InterlockedExchangeAdd:
4298 return RValue::get(
4299 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
4300 case Builtin::BI_InterlockedExchangeSub8:
4301 case Builtin::BI_InterlockedExchangeSub16:
4302 case Builtin::BI_InterlockedExchangeSub:
4303 return RValue::get(
4304 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
4305 case Builtin::BI_InterlockedOr8:
4306 case Builtin::BI_InterlockedOr16:
4307 case Builtin::BI_InterlockedOr:
4308 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
4309 case Builtin::BI_InterlockedXor8:
4310 case Builtin::BI_InterlockedXor16:
4311 case Builtin::BI_InterlockedXor:
4312 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
4313
4314 case Builtin::BI_bittest64:
4315 case Builtin::BI_bittest:
4316 case Builtin::BI_bittestandcomplement64:
4317 case Builtin::BI_bittestandcomplement:
4318 case Builtin::BI_bittestandreset64:
4319 case Builtin::BI_bittestandreset:
4320 case Builtin::BI_bittestandset64:
4321 case Builtin::BI_bittestandset:
4322 case Builtin::BI_interlockedbittestandreset:
4323 case Builtin::BI_interlockedbittestandreset64:
4324 case Builtin::BI_interlockedbittestandset64:
4325 case Builtin::BI_interlockedbittestandset:
4326 case Builtin::BI_interlockedbittestandset_acq:
4327 case Builtin::BI_interlockedbittestandset_rel:
4328 case Builtin::BI_interlockedbittestandset_nf:
4329 case Builtin::BI_interlockedbittestandreset_acq:
4330 case Builtin::BI_interlockedbittestandreset_rel:
4331 case Builtin::BI_interlockedbittestandreset_nf:
4332 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
4333
4334 // These builtins exist to emit regular volatile loads and stores not
4335 // affected by the -fms-volatile setting.
4336 case Builtin::BI__iso_volatile_load8:
4337 case Builtin::BI__iso_volatile_load16:
4338 case Builtin::BI__iso_volatile_load32:
4339 case Builtin::BI__iso_volatile_load64:
4340 return RValue::get(EmitISOVolatileLoad(*this, E));
4341 case Builtin::BI__iso_volatile_store8:
4342 case Builtin::BI__iso_volatile_store16:
4343 case Builtin::BI__iso_volatile_store32:
4344 case Builtin::BI__iso_volatile_store64:
4345 return RValue::get(EmitISOVolatileStore(*this, E));
4346
4347 case Builtin::BI__exception_code:
4348 case Builtin::BI_exception_code:
4349 return RValue::get(EmitSEHExceptionCode());
4350 case Builtin::BI__exception_info:
4351 case Builtin::BI_exception_info:
4352 return RValue::get(EmitSEHExceptionInfo());
4353 case Builtin::BI__abnormal_termination:
4354 case Builtin::BI_abnormal_termination:
4355 return RValue::get(EmitSEHAbnormalTermination());
4356 case Builtin::BI_setjmpex:
4357 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
4358 E->getArg(0)->getType()->isPointerType())
4359 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
4360 break;
4361 case Builtin::BI_setjmp:
4362 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
4363 E->getArg(0)->getType()->isPointerType()) {
4364 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
4365 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
4366 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
4367 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
4368 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
4369 }
4370 break;
4371
4372 case Builtin::BI__GetExceptionInfo: {
4373 if (llvm::GlobalVariable *GV =
4374 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
4375 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
4376 break;
4377 }
4378
4379 case Builtin::BI__fastfail:
4380 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
4381
4382 case Builtin::BI__builtin_coro_size: {
4383 auto & Context = getContext();
4384 auto SizeTy = Context.getSizeType();
4385 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4386 Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
4387 return RValue::get(Builder.CreateCall(F));
4388 }
4389
4390 case Builtin::BI__builtin_coro_id:
4391 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
4392 case Builtin::BI__builtin_coro_promise:
4393 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
4394 case Builtin::BI__builtin_coro_resume:
4395 return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
4396 case Builtin::BI__builtin_coro_frame:
4397 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
4398 case Builtin::BI__builtin_coro_noop:
4399 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
4400 case Builtin::BI__builtin_coro_free:
4401 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
4402 case Builtin::BI__builtin_coro_destroy:
4403 return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
4404 case Builtin::BI__builtin_coro_done:
4405 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
4406 case Builtin::BI__builtin_coro_alloc:
4407 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
4408 case Builtin::BI__builtin_coro_begin:
4409 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
4410 case Builtin::BI__builtin_coro_end:
4411 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
4412 case Builtin::BI__builtin_coro_suspend:
4413 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
4414 case Builtin::BI__builtin_coro_param:
4415 return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
4416
4417 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
4418 case Builtin::BIread_pipe:
4419 case Builtin::BIwrite_pipe: {
4420 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4421 *Arg1 = EmitScalarExpr(E->getArg(1));
4422 CGOpenCLRuntime OpenCLRT(CGM);
4423 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4424 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4425
4426 // Type of the generic packet parameter.
4427 unsigned GenericAS =
4428 getContext().getTargetAddressSpace(LangAS::opencl_generic);
4429 llvm::Type *I8PTy = llvm::PointerType::get(
4430 llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
4431
4432 // Testing which overloaded version we should generate the call for.
4433 if (2U == E->getNumArgs()) {
4434 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
4435 : "__write_pipe_2";
4436 // Creating a generic function type to be able to call with any builtin or
4437 // user defined type.
4438 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
4439 llvm::FunctionType *FTy = llvm::FunctionType::get(
4440 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4441 Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
4442 return RValue::get(
4443 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4444 {Arg0, BCast, PacketSize, PacketAlign}));
4445 } else {
4446 assert(4 == E->getNumArgs() &&
4447 "Illegal number of parameters to pipe function");
4448 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
4449 : "__write_pipe_4";
4450
4451 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
4452 Int32Ty, Int32Ty};
4453 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
4454 *Arg3 = EmitScalarExpr(E->getArg(3));
4455 llvm::FunctionType *FTy = llvm::FunctionType::get(
4456 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4457 Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
4458 // We know the third argument is an integer type, but we may need to cast
4459 // it to i32.
4460 if (Arg2->getType() != Int32Ty)
4461 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
4462 return RValue::get(
4463 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4464 {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
4465 }
4466 }
4467 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
4468 // functions
4469 case Builtin::BIreserve_read_pipe:
4470 case Builtin::BIreserve_write_pipe:
4471 case Builtin::BIwork_group_reserve_read_pipe:
4472 case Builtin::BIwork_group_reserve_write_pipe:
4473 case Builtin::BIsub_group_reserve_read_pipe:
4474 case Builtin::BIsub_group_reserve_write_pipe: {
4475 // Composing the mangled name for the function.
4476 const char *Name;
4477 if (BuiltinID == Builtin::BIreserve_read_pipe)
4478 Name = "__reserve_read_pipe";
4479 else if (BuiltinID == Builtin::BIreserve_write_pipe)
4480 Name = "__reserve_write_pipe";
4481 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
4482 Name = "__work_group_reserve_read_pipe";
4483 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
4484 Name = "__work_group_reserve_write_pipe";
4485 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
4486 Name = "__sub_group_reserve_read_pipe";
4487 else
4488 Name = "__sub_group_reserve_write_pipe";
4489
4490 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4491 *Arg1 = EmitScalarExpr(E->getArg(1));
4492 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
4493 CGOpenCLRuntime OpenCLRT(CGM);
4494 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4495 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4496
4497 // Building the generic function prototype.
4498 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
4499 llvm::FunctionType *FTy = llvm::FunctionType::get(
4500 ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4501 // We know the second argument is an integer type, but we may need to cast
4502 // it to i32.
4503 if (Arg1->getType() != Int32Ty)
4504 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
4505 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4506 {Arg0, Arg1, PacketSize, PacketAlign}));
4507 }
4508 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
4509 // functions
4510 case Builtin::BIcommit_read_pipe:
4511 case Builtin::BIcommit_write_pipe:
4512 case Builtin::BIwork_group_commit_read_pipe:
4513 case Builtin::BIwork_group_commit_write_pipe:
4514 case Builtin::BIsub_group_commit_read_pipe:
4515 case Builtin::BIsub_group_commit_write_pipe: {
4516 const char *Name;
4517 if (BuiltinID == Builtin::BIcommit_read_pipe)
4518 Name = "__commit_read_pipe";
4519 else if (BuiltinID == Builtin::BIcommit_write_pipe)
4520 Name = "__commit_write_pipe";
4521 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
4522 Name = "__work_group_commit_read_pipe";
4523 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
4524 Name = "__work_group_commit_write_pipe";
4525 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
4526 Name = "__sub_group_commit_read_pipe";
4527 else
4528 Name = "__sub_group_commit_write_pipe";
4529
4530 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4531 *Arg1 = EmitScalarExpr(E->getArg(1));
4532 CGOpenCLRuntime OpenCLRT(CGM);
4533 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4534 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4535
4536 // Building the generic function prototype.
4537 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
4538 llvm::FunctionType *FTy =
4539 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
4540 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4541
4542 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4543 {Arg0, Arg1, PacketSize, PacketAlign}));
4544 }
4545 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
4546 case Builtin::BIget_pipe_num_packets:
4547 case Builtin::BIget_pipe_max_packets: {
4548 const char *BaseName;
4549 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
4550 if (BuiltinID == Builtin::BIget_pipe_num_packets)
4551 BaseName = "__get_pipe_num_packets";
4552 else
4553 BaseName = "__get_pipe_max_packets";
4554 std::string Name = std::string(BaseName) +
4555 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
4556
4557 // Building the generic function prototype.
4558 Value *Arg0 = EmitScalarExpr(E->getArg(0));
4559 CGOpenCLRuntime OpenCLRT(CGM);
4560 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4561 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4562 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
4563 llvm::FunctionType *FTy = llvm::FunctionType::get(
4564 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4565
4566 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4567 {Arg0, PacketSize, PacketAlign}));
4568 }
4569
4570 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
4571 case Builtin::BIto_global:
4572 case Builtin::BIto_local:
4573 case Builtin::BIto_private: {
4574 auto Arg0 = EmitScalarExpr(E->getArg(0));
4575 auto NewArgT = llvm::PointerType::get(Int8Ty,
4576 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4577 auto NewRetT = llvm::PointerType::get(Int8Ty,
4578 CGM.getContext().getTargetAddressSpace(
4579 E->getType()->getPointeeType().getAddressSpace()));
4580 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
4581 llvm::Value *NewArg;
4582 if (Arg0->getType()->getPointerAddressSpace() !=
4583 NewArgT->getPointerAddressSpace())
4584 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
4585 else
4586 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
4587 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
4588 auto NewCall =
4589 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
4590 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
4591 ConvertType(E->getType())));
4592 }
4593
4594 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
4595 // It contains four different overload formats specified in Table 6.13.17.1.
4596 case Builtin::BIenqueue_kernel: {
4597 StringRef Name; // Generated function call name
4598 unsigned NumArgs = E->getNumArgs();
4599
4600 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
4601 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4602 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4603
4604 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
4605 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
4606 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
4607 llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
4608 llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
4609
4610 if (NumArgs == 4) {
4611 // The most basic form of the call with parameters:
4612 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
4613 Name = "__enqueue_kernel_basic";
4614 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
4615 GenericVoidPtrTy};
4616 llvm::FunctionType *FTy = llvm::FunctionType::get(
4617 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4618
4619 auto Info =
4620 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4621 llvm::Value *Kernel =
4622 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4623 llvm::Value *Block =
4624 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4625
4626 AttrBuilder B;
4627 B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
4628 llvm::AttributeList ByValAttrSet =
4629 llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
4630
4631 auto RTCall =
4632 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
4633 {Queue, Flags, Range, Kernel, Block});
4634 RTCall->setAttributes(ByValAttrSet);
4635 return RValue::get(RTCall);
4636 }
4637 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
4638
4639 // Create a temporary array to hold the sizes of local pointer arguments
4640 // for the block. \p First is the position of the first size argument.
4641 auto CreateArrayForSizeVar = [=](unsigned First)
4642 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
4643 llvm::APInt ArraySize(32, NumArgs - First);
4644 QualType SizeArrayTy = getContext().getConstantArrayType(
4645 getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
4646 /*IndexTypeQuals=*/0);
4647 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
4648 llvm::Value *TmpPtr = Tmp.getPointer();
4649 llvm::Value *TmpSize = EmitLifetimeStart(
4650 CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
4651 llvm::Value *ElemPtr;
4652 // Each of the following arguments specifies the size of the corresponding
4653 // argument passed to the enqueued block.
4654 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
4655 for (unsigned I = First; I < NumArgs; ++I) {
4656 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
4657 auto *GEP = Builder.CreateGEP(TmpPtr, {Zero, Index});
4658 if (I == First)
4659 ElemPtr = GEP;
4660 auto *V =
4661 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
4662 Builder.CreateAlignedStore(
4663 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
4664 }
4665 return std::tie(ElemPtr, TmpSize, TmpPtr);
4666 };
4667
4668 // Could have events and/or varargs.
4669 if (E->getArg(3)->getType()->isBlockPointerType()) {
4670 // No events passed, but has variadic arguments.
4671 Name = "__enqueue_kernel_varargs";
4672 auto Info =
4673 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4674 llvm::Value *Kernel =
4675 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4676 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4677 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4678 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
4679
4680 // Create a vector of the arguments, as well as a constant value to
4681 // express to the runtime the number of variadic arguments.
4682 llvm::Value *const Args[] = {Queue, Flags,
4683 Range, Kernel,
4684 Block, ConstantInt::get(IntTy, NumArgs - 4),
4685 ElemPtr};
4686 llvm::Type *const ArgTys[] = {
4687 QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
4688 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
4689
4690 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
4691 auto Call = RValue::get(
4692 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
4693 if (TmpSize)
4694 EmitLifetimeEnd(TmpSize, TmpPtr);
4695 return Call;
4696 }
4697 // Any calls now have event arguments passed.
4698 if (NumArgs >= 7) {
4699 llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
4700 llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
4701 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4702
4703 llvm::Value *NumEvents =
4704 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
4705
4706 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
4707 // to be a null pointer constant (including `0` literal), we can take it
4708 // into account and emit null pointer directly.
4709 llvm::Value *EventWaitList = nullptr;
4710 if (E->getArg(4)->isNullPointerConstant(
4711 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4712 EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
4713 } else {
4714 EventWaitList = E->getArg(4)->getType()->isArrayType()
4715 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
4716 : EmitScalarExpr(E->getArg(4));
4717 // Convert to generic address space.
4718 EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
4719 }
4720 llvm::Value *EventRet = nullptr;
4721 if (E->getArg(5)->isNullPointerConstant(
4722 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4723 EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
4724 } else {
4725 EventRet =
4726 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
4727 }
4728
4729 auto Info =
4730 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
4731 llvm::Value *Kernel =
4732 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4733 llvm::Value *Block =
4734 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4735
4736 std::vector<llvm::Type *> ArgTys = {
4737 QueueTy, Int32Ty, RangeTy, Int32Ty,
4738 EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
4739
4740 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
4741 NumEvents, EventWaitList, EventRet,
4742 Kernel, Block};
4743
4744 if (NumArgs == 7) {
4745 // Has events but no variadics.
4746 Name = "__enqueue_kernel_basic_events";
4747 llvm::FunctionType *FTy = llvm::FunctionType::get(
4748 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4749 return RValue::get(
4750 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4751 llvm::ArrayRef<llvm::Value *>(Args)));
4752 }
4753 // Has event info and variadics
4754 // Pass the number of variadics to the runtime function too.
4755 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
4756 ArgTys.push_back(Int32Ty);
4757 Name = "__enqueue_kernel_events_varargs";
4758
4759 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4760 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
4761 Args.push_back(ElemPtr);
4762 ArgTys.push_back(ElemPtr->getType());
4763
4764 llvm::FunctionType *FTy = llvm::FunctionType::get(
4765 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4766 auto Call =
4767 RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4768 llvm::ArrayRef<llvm::Value *>(Args)));
4769 if (TmpSize)
4770 EmitLifetimeEnd(TmpSize, TmpPtr);
4771 return Call;
4772 }
4773 LLVM_FALLTHROUGH;
4774 }
4775 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
4776 // parameter.
4777 case Builtin::BIget_kernel_work_group_size: {
4778 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4779 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4780 auto Info =
4781 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4782 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4783 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4784 return RValue::get(EmitRuntimeCall(
4785 CGM.CreateRuntimeFunction(
4786 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4787 false),
4788 "__get_kernel_work_group_size_impl"),
4789 {Kernel, Arg}));
4790 }
4791 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
4792 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4793 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4794 auto Info =
4795 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4796 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4797 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4798 return RValue::get(EmitRuntimeCall(
4799 CGM.CreateRuntimeFunction(
4800 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4801 false),
4802 "__get_kernel_preferred_work_group_size_multiple_impl"),
4803 {Kernel, Arg}));
4804 }
4805 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
4806 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
4807 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4808 getContext().getTargetAddressSpace(LangAS::opencl_generic));
4809 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
4810 llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
4811 auto Info =
4812 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
4813 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4814 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4815 const char *Name =
4816 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
4817 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
4818 : "__get_kernel_sub_group_count_for_ndrange_impl";
4819 return RValue::get(EmitRuntimeCall(
4820 CGM.CreateRuntimeFunction(
4821 llvm::FunctionType::get(
4822 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
4823 false),
4824 Name),
4825 {NDRange, Kernel, Block}));
4826 }
4827
4828 case Builtin::BI__builtin_store_half:
4829 case Builtin::BI__builtin_store_halff: {
4830 Value *Val = EmitScalarExpr(E->getArg(0));
4831 Address Address = EmitPointerWithAlignment(E->getArg(1));
4832 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
4833 return RValue::get(Builder.CreateStore(HalfVal, Address));
4834 }
4835 case Builtin::BI__builtin_load_half: {
4836 Address Address = EmitPointerWithAlignment(E->getArg(0));
4837 Value *HalfVal = Builder.CreateLoad(Address);
4838 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
4839 }
4840 case Builtin::BI__builtin_load_halff: {
4841 Address Address = EmitPointerWithAlignment(E->getArg(0));
4842 Value *HalfVal = Builder.CreateLoad(Address);
4843 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
4844 }
4845 case Builtin::BIprintf:
4846 if (getTarget().getTriple().isNVPTX())
4847 return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
4848 if (getTarget().getTriple().getArch() == Triple::amdgcn &&
4849 getLangOpts().HIP)
4850 return EmitAMDGPUDevicePrintfCallExpr(E, ReturnValue);
4851 break;
4852 case Builtin::BI__builtin_canonicalize:
4853 case Builtin::BI__builtin_canonicalizef:
4854 case Builtin::BI__builtin_canonicalizef16:
4855 case Builtin::BI__builtin_canonicalizel:
4856 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
4857
4858 case Builtin::BI__builtin_thread_pointer: {
4859 if (!getContext().getTargetInfo().isTLSSupported())
4860 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
4861 // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
4862 break;
4863 }
4864 case Builtin::BI__builtin_os_log_format:
4865 return emitBuiltinOSLogFormat(*E);
4866
4867 case Builtin::BI__xray_customevent: {
4868 if (!ShouldXRayInstrumentFunction())
4869 return RValue::getIgnored();
4870
4871 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4872 XRayInstrKind::Custom))
4873 return RValue::getIgnored();
4874
4875 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4876 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
4877 return RValue::getIgnored();
4878
4879 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
4880 auto FTy = F->getFunctionType();
4881 auto Arg0 = E->getArg(0);
4882 auto Arg0Val = EmitScalarExpr(Arg0);
4883 auto Arg0Ty = Arg0->getType();
4884 auto PTy0 = FTy->getParamType(0);
4885 if (PTy0 != Arg0Val->getType()) {
4886 if (Arg0Ty->isArrayType())
4887 Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
4888 else
4889 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
4890 }
4891 auto Arg1 = EmitScalarExpr(E->getArg(1));
4892 auto PTy1 = FTy->getParamType(1);
4893 if (PTy1 != Arg1->getType())
4894 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
4895 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
4896 }
4897
4898 case Builtin::BI__xray_typedevent: {
4899 // TODO: There should be a way to always emit events even if the current
4900 // function is not instrumented. Losing events in a stream can cripple
4901 // a trace.
4902 if (!ShouldXRayInstrumentFunction())
4903 return RValue::getIgnored();
4904
4905 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4906 XRayInstrKind::Typed))
4907 return RValue::getIgnored();
4908
4909 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4910 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
4911 return RValue::getIgnored();
4912
4913 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
4914 auto FTy = F->getFunctionType();
4915 auto Arg0 = EmitScalarExpr(E->getArg(0));
4916 auto PTy0 = FTy->getParamType(0);
4917 if (PTy0 != Arg0->getType())
4918 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
4919 auto Arg1 = E->getArg(1);
4920 auto Arg1Val = EmitScalarExpr(Arg1);
4921 auto Arg1Ty = Arg1->getType();
4922 auto PTy1 = FTy->getParamType(1);
4923 if (PTy1 != Arg1Val->getType()) {
4924 if (Arg1Ty->isArrayType())
4925 Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
4926 else
4927 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
4928 }
4929 auto Arg2 = EmitScalarExpr(E->getArg(2));
4930 auto PTy2 = FTy->getParamType(2);
4931 if (PTy2 != Arg2->getType())
4932 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
4933 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
4934 }
4935
4936 case Builtin::BI__builtin_ms_va_start:
4937 case Builtin::BI__builtin_ms_va_end:
4938 return RValue::get(
4939 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
4940 BuiltinID == Builtin::BI__builtin_ms_va_start));
4941
4942 case Builtin::BI__builtin_ms_va_copy: {
4943 // Lower this manually. We can't reliably determine whether or not any
4944 // given va_copy() is for a Win64 va_list from the calling convention
4945 // alone, because it's legal to do this from a System V ABI function.
4946 // With opaque pointer types, we won't have enough information in LLVM
4947 // IR to determine this from the argument types, either. Best to do it
4948 // now, while we have enough information.
4949 Address DestAddr = EmitMSVAListRef(E->getArg(0));
4950 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
4951
4952 llvm::Type *BPP = Int8PtrPtrTy;
4953
4954 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
4955 DestAddr.getAlignment());
4956 SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
4957 SrcAddr.getAlignment());
4958
4959 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
4960 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
4961 }
4962 }
4963
4964 // If this is an alias for a lib function (e.g. __builtin_sin), emit
4965 // the call using the normal call path, but using the unmangled
4966 // version of the function name.
4967 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
4968 return emitLibraryCall(*this, FD, E,
4969 CGM.getBuiltinLibFunction(FD, BuiltinID));
4970
4971 // If this is a predefined lib function (e.g. malloc), emit the call
4972 // using exactly the normal call path.
4973 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
4974 return emitLibraryCall(*this, FD, E,
4975 cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
4976
4977 // Check that a call to a target specific builtin has the correct target
4978 // features.
4979 // This is down here to avoid non-target specific builtins, however, if
4980 // generic builtins start to require generic target features then we
4981 // can move this up to the beginning of the function.
4982 checkTargetFeatures(E, FD);
4983
4984 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
4985 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
4986
4987 // See if we have a target specific intrinsic.
4988 const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
4989 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
4990 StringRef Prefix =
4991 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
4992 if (!Prefix.empty()) {
4993 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
4994 // NOTE we don't need to perform a compatibility flag check here since the
4995 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
4996 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
4997 if (IntrinsicID == Intrinsic::not_intrinsic)
4998 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
4999 }
5000
5001 if (IntrinsicID != Intrinsic::not_intrinsic) {
5002 SmallVector<Value*, 16> Args;
5003
5004 // Find out if any arguments are required to be integer constant
5005 // expressions.
5006 unsigned ICEArguments = 0;
5007 ASTContext::GetBuiltinTypeError Error;
5008 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
5009 assert(Error == ASTContext::GE_None && "Should not codegen an error");
5010
5011 Function *F = CGM.getIntrinsic(IntrinsicID);
5012 llvm::FunctionType *FTy = F->getFunctionType();
5013
5014 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
5015 Value *ArgValue;
5016 // If this is a normal argument, just emit it as a scalar.
5017 if ((ICEArguments & (1 << i)) == 0) {
5018 ArgValue = EmitScalarExpr(E->getArg(i));
5019 } else {
5020 // If this is required to be a constant, constant fold it so that we
5021 // know that the generated intrinsic gets a ConstantInt.
5022 ArgValue = llvm::ConstantInt::get(
5023 getLLVMContext(),
5024 *E->getArg(i)->getIntegerConstantExpr(getContext()));
5025 }
5026
5027 // If the intrinsic arg type is different from the builtin arg type
5028 // we need to do a bit cast.
5029 llvm::Type *PTy = FTy->getParamType(i);
5030 if (PTy != ArgValue->getType()) {
5031 // XXX - vector of pointers?
5032 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
5033 if (PtrTy->getAddressSpace() !=
5034 ArgValue->getType()->getPointerAddressSpace()) {
5035 ArgValue = Builder.CreateAddrSpaceCast(
5036 ArgValue,
5037 ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
5038 }
5039 }
5040
5041 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
5042 "Must be able to losslessly bit cast to param");
5043 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
5044 }
5045
5046 Args.push_back(ArgValue);
5047 }
5048
5049 Value *V = Builder.CreateCall(F, Args);
5050 QualType BuiltinRetType = E->getType();
5051
5052 llvm::Type *RetTy = VoidTy;
5053 if (!BuiltinRetType->isVoidType())
5054 RetTy = ConvertType(BuiltinRetType);
5055
5056 if (RetTy != V->getType()) {
5057 // XXX - vector of pointers?
5058 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
5059 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
5060 V = Builder.CreateAddrSpaceCast(
5061 V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
5062 }
5063 }
5064
5065 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
5066 "Must be able to losslessly bit cast result type");
5067 V = Builder.CreateBitCast(V, RetTy);
5068 }
5069
5070 return RValue::get(V);
5071 }
5072
5073 // Some target-specific builtins can have aggregate return values, e.g.
5074 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
5075 // ReturnValue to be non-null, so that the target-specific emission code can
5076 // always just emit into it.
5077 TypeEvaluationKind EvalKind = getEvaluationKind(E->getType());
5078 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
5079 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
5080 ReturnValue = ReturnValueSlot(DestPtr, false);
5081 }
5082
5083 // Now see if we can emit a target-specific builtin.
5084 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
5085 switch (EvalKind) {
5086 case TEK_Scalar:
5087 return RValue::get(V);
5088 case TEK_Aggregate:
5089 return RValue::getAggregate(ReturnValue.getValue(),
5090 ReturnValue.isVolatile());
5091 case TEK_Complex:
5092 llvm_unreachable("No current target builtin returns complex");
5093 }
5094 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
5095 }
5096
5097 ErrorUnsupported(E, "builtin function");
5098
5099 // Unknown builtin, for now just dump it out and return undef.
5100 return GetUndefRValue(E->getType());
5101}
5102
5103static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
5104 unsigned BuiltinID, const CallExpr *E,
5105 ReturnValueSlot ReturnValue,
5106 llvm::Triple::ArchType Arch) {
5107 switch (Arch) {
5108 case llvm::Triple::arm:
5109 case llvm::Triple::armeb:
5110 case llvm::Triple::thumb:
5111 case llvm::Triple::thumbeb:
5112 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
5113 case llvm::Triple::aarch64:
5114 case llvm::Triple::aarch64_32:
5115 case llvm::Triple::aarch64_be:
5116 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
5117 case llvm::Triple::bpfeb:
5118 case llvm::Triple::bpfel:
5119 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
5120 case llvm::Triple::x86:
5121 case llvm::Triple::x86_64:
5122 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
5123 case llvm::Triple::ppc:
5124 case llvm::Triple::ppcle:
5125 case llvm::Triple::ppc64:
5126 case llvm::Triple::ppc64le:
5127 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
5128 case llvm::Triple::r600:
5129 case llvm::Triple::amdgcn:
5130 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
5131 case llvm::Triple::systemz:
5132 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
5133 case llvm::Triple::nvptx:
5134 case llvm::Triple::nvptx64:
5135 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
5136 case llvm::Triple::wasm32:
5137 case llvm::Triple::wasm64:
5138 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
5139 case llvm::Triple::hexagon:
5140 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
5141 default:
5142 return nullptr;
5143 }
5144}
5145
5146Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
5147 const CallExpr *E,
5148 ReturnValueSlot ReturnValue) {
5149 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
5150 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
5151 return EmitTargetArchBuiltinExpr(
5152 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
5153 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
5154 }
5155
5156 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
5157 getTarget().getTriple().getArch());
5158}
5159
5160static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
5161 NeonTypeFlags TypeFlags,
5162 bool HasLegalHalfType = true,
5163 bool V1Ty = false,
5164 bool AllowBFloatArgsAndRet = true) {
5165 int IsQuad = TypeFlags.isQuad();
5166 switch (TypeFlags.getEltType()) {
5167 case NeonTypeFlags::Int8:
5168 case NeonTypeFlags::Poly8:
5169 return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
5170 case NeonTypeFlags::Int16:
5171 case NeonTypeFlags::Poly16:
5172 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5173 case NeonTypeFlags::BFloat16:
5174 if (AllowBFloatArgsAndRet)
5175 return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
5176 else
5177 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5178 case NeonTypeFlags::Float16:
5179 if (HasLegalHalfType)
5180 return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
5181 else
5182 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5183 case NeonTypeFlags::Int32:
5184 return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
5185 case NeonTypeFlags::Int64:
5186 case NeonTypeFlags::Poly64:
5187 return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
5188 case NeonTypeFlags::Poly128:
5189 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
5190 // There is a lot of i128 and f128 API missing.
5191 // so we use v16i8 to represent poly128 and get pattern matched.
5192 return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
5193 case NeonTypeFlags::Float32:
5194 return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
5195 case NeonTypeFlags::Float64:
5196 return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
5197 }
5198 llvm_unreachable("Unknown vector element type!");
5199}
5200
5201static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
5202 NeonTypeFlags IntTypeFlags) {
5203 int IsQuad = IntTypeFlags.isQuad();
5204 switch (IntTypeFlags.getEltType()) {
5205 case NeonTypeFlags::Int16:
5206 return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
5207 case NeonTypeFlags::Int32:
5208 return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
5209 case NeonTypeFlags::Int64:
5210 return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
5211 default:
5212 llvm_unreachable("Type can't be converted to floating-point!");
5213 }
5214}
5215
5216Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
5217 const ElementCount &Count) {
5218 Value *SV = llvm::ConstantVector::getSplat(Count, C);
5219 return Builder.CreateShuffleVector(V, V, SV, "lane");
5220}
5221
5222Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
5223 ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
5224 return EmitNeonSplat(V, C, EC);
5225}
5226
5227Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
5228 const char *name,
5229 unsigned shift, bool rightshift) {
5230 unsigned j = 0;
5231 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5232 ai != ae; ++ai, ++j) {
5233 if (F->isConstrainedFPIntrinsic())
5234 if (ai->getType()->isMetadataTy())
5235 continue;
5236 if (shift > 0 && shift == j)
5237 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
5238 else
5239 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
5240 }
5241
5242 if (F->isConstrainedFPIntrinsic())
5243 return Builder.CreateConstrainedFPCall(F, Ops, name);
5244 else
5245 return Builder.CreateCall(F, Ops, name);
5246}
5247
5248Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
5249 bool neg) {
5250 int SV = cast<ConstantInt>(V)->getSExtValue();
5251 return ConstantInt::get(Ty, neg ? -SV : SV);
5252}
5253
5254// Right-shift a vector by a constant.
5255Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
5256 llvm::Type *Ty, bool usgn,
5257 const char *name) {
5258 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
5259
5260 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
5261 int EltSize = VTy->getScalarSizeInBits();
5262
5263 Vec = Builder.CreateBitCast(Vec, Ty);
5264
5265 // lshr/ashr are undefined when the shift amount is equal to the vector
5266 // element size.
5267 if (ShiftAmt == EltSize) {
5268 if (usgn) {
5269 // Right-shifting an unsigned value by its size yields 0.
5270 return llvm::ConstantAggregateZero::get(VTy);
5271 } else {
5272 // Right-shifting a signed value by its size is equivalent
5273 // to a shift of size-1.
5274 --ShiftAmt;
5275 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
5276 }
5277 }
5278
5279 Shift = EmitNeonShiftVector(Shift, Ty, false);
5280 if (usgn)
5281 return Builder.CreateLShr(Vec, Shift, name);
5282 else
5283 return Builder.CreateAShr(Vec, Shift, name);
5284}
5285
5286enum {
5287 AddRetType = (1 << 0),
5288 Add1ArgType = (1 << 1),
5289 Add2ArgTypes = (1 << 2),
5290
5291 VectorizeRetType = (1 << 3),
5292 VectorizeArgTypes = (1 << 4),
5293
5294 InventFloatType = (1 << 5),
5295 UnsignedAlts = (1 << 6),
5296
5297 Use64BitVectors = (1 << 7),
5298 Use128BitVectors = (1 << 8),
5299
5300 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
5301 VectorRet = AddRetType | VectorizeRetType,
5302 VectorRetGetArgs01 =
5303 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
5304 FpCmpzModifiers =
5305 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
5306};
5307
5308namespace {
5309struct ARMVectorIntrinsicInfo {
5310 const char *NameHint;
5311 unsigned BuiltinID;
5312 unsigned LLVMIntrinsic;
5313 unsigned AltLLVMIntrinsic;
5314 uint64_t TypeModifier;
5315
5316 bool operator<(unsigned RHSBuiltinID) const {
5317 return BuiltinID < RHSBuiltinID;
5318 }
5319 bool operator<(const ARMVectorIntrinsicInfo &TE) const {
5320 return BuiltinID < TE.BuiltinID;
5321 }
5322};
5323} // end anonymous namespace
5324
5325#define NEONMAP0(NameBase) \
5326 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
5327
5328#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
5329 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
5330 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
5331
5332#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
5333 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
5334 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
5335 TypeModifier }
5336
5337static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
5338 NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0),
5339 NEONMAP0(splat_lane_v),
5340 NEONMAP0(splat_laneq_v),
5341 NEONMAP0(splatq_lane_v),
5342 NEONMAP0(splatq_laneq_v),
5343 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
5344 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
5345 NEONMAP1(vabs_v, arm_neon_vabs, 0),
5346 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
5347 NEONMAP0(vaddhn_v),
5348 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
5349 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
5350 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
5351 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
5352 NEONMAP1(vbfdot_v, arm_neon_bfdot, 0),
5353 NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0),
5354 NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0),
5355 NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0),
5356 NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0),
5357 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
5358 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
5359 NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
5360 NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
5361 NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
5362 NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
5363 NEONMAP1(vcage_v, arm_neon_vacge, 0),
5364 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
5365 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
5366 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
5367 NEONMAP1(vcale_v, arm_neon_vacge, 0),
5368 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
5369 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
5370 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
5371 NEONMAP0(vceqz_v),
5372 NEONMAP0(vceqzq_v),
5373 NEONMAP0(vcgez_v),
5374 NEONMAP0(vcgezq_v),
5375 NEONMAP0(vcgtz_v),
5376 NEONMAP0(vcgtzq_v),
5377 NEONMAP0(vclez_v),
5378 NEONMAP0(vclezq_v),
5379 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
5380 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
5381 NEONMAP0(vcltz_v),
5382 NEONMAP0(vcltzq_v),
5383 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5384 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5385 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5386 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5387 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
5388 NEONMAP0(vcvt_f16_v),
5389 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
5390 NEONMAP0(vcvt_f32_v),
5391 NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5392 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5393 NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
5394 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
5395 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
5396 NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
5397 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
5398 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
5399 NEONMAP0(vcvt_s16_v),
5400 NEONMAP0(vcvt_s32_v),
5401 NEONMAP0(vcvt_s64_v),
5402 NEONMAP0(vcvt_u16_v),
5403 NEONMAP0(vcvt_u32_v),
5404 NEONMAP0(vcvt_u64_v),
5405 NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
5406 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
5407 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
5408 NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
5409 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
5410 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
5411 NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
5412 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
5413 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
5414 NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
5415 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
5416 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
5417 NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
5418 NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
5419 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
5420 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
5421 NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
5422 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
5423 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
5424 NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
5425 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
5426 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
5427 NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
5428 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
5429 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
5430 NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
5431 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
5432 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
5433 NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
5434 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
5435 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
5436 NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
5437 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
5438 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
5439 NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
5440 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
5441 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
5442 NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
5443 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
5444 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
5445 NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
5446 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
5447 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
5448 NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
5449 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
5450 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
5451 NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
5452 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
5453 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
5454 NEONMAP0(vcvtq_f16_v),
5455 NEONMAP0(vcvtq_f32_v),
5456 NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5457 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5458 NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
5459 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
5460 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
5461 NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
5462 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
5463 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
5464 NEONMAP0(vcvtq_s16_v),
5465 NEONMAP0(vcvtq_s32_v),
5466 NEONMAP0(vcvtq_s64_v),
5467 NEONMAP0(vcvtq_u16_v),
5468 NEONMAP0(vcvtq_u32_v),
5469 NEONMAP0(vcvtq_u64_v),
5470 NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
5471 NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
5472 NEONMAP0(vext_v),
5473 NEONMAP0(vextq_v),
5474 NEONMAP0(vfma_v),
5475 NEONMAP0(vfmaq_v),
5476 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
5477 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
5478 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
5479 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
5480 NEONMAP0(vld1_dup_v),
5481 NEONMAP1(vld1_v, arm_neon_vld1, 0),
5482 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
5483 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
5484 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
5485 NEONMAP0(vld1q_dup_v),
5486 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
5487 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
5488 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
5489 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
5490 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
5491 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
5492 NEONMAP1(vld2_v, arm_neon_vld2, 0),
5493 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
5494 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
5495 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
5496 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
5497 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
5498 NEONMAP1(vld3_v, arm_neon_vld3, 0),
5499 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
5500 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
5501 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
5502 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
5503 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
5504 NEONMAP1(vld4_v, arm_neon_vld4, 0),
5505 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
5506 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
5507 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
5508 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
5509 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
5510 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
5511 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
5512 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
5513 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
5514 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
5515 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
5516 NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
5517 NEONMAP0(vmovl_v),
5518 NEONMAP0(vmovn_v),
5519 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
5520 NEONMAP0(vmull_v),
5521 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
5522 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
5523 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
5524 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
5525 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
5526 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
5527 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
5528 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
5529 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
5530 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
5531 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
5532 NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
5533 NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
5534 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
5535 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
5536 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
5537 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
5538 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
5539 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
5540 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
5541 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
5542 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
5543 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
5544 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
5545 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
5546 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
5547 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
5548 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
5549 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
5550 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
5551 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
5552 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
5553 NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
5554 NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
5555 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
5556 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
5557 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
5558 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
5559 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
5560 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
5561 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
5562 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
5563 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
5564 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
5565 NEONMAP0(vrndi_v),
5566 NEONMAP0(vrndiq_v),
5567 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
5568 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
5569 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
5570 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
5571 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
5572 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
5573 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
5574 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
5575 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
5576 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
5577 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
5578 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
5579 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
5580 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
5581 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
5582 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
5583 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
5584 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
5585 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
5586 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
5587 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
5588 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
5589 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
5590 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
5591 NEONMAP0(vshl_n_v),
5592 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5593 NEONMAP0(vshll_n_v),
5594 NEONMAP0(vshlq_n_v),
5595 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5596 NEONMAP0(vshr_n_v),
5597 NEONMAP0(vshrn_n_v),
5598 NEONMAP0(vshrq_n_v),
5599 NEONMAP1(vst1_v, arm_neon_vst1, 0),
5600 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
5601 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
5602 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
5603 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
5604 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
5605 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
5606 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
5607 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
5608 NEONMAP1(vst2_v, arm_neon_vst2, 0),
5609 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
5610 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
5611 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
5612 NEONMAP1(vst3_v, arm_neon_vst3, 0),
5613 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
5614 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
5615 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
5616 NEONMAP1(vst4_v, arm_neon_vst4, 0),
5617 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
5618 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
5619 NEONMAP0(vsubhn_v),
5620 NEONMAP0(vtrn_v),
5621 NEONMAP0(vtrnq_v),
5622 NEONMAP0(vtst_v),
5623 NEONMAP0(vtstq_v),
5624 NEONMAP1(vusdot_v, arm_neon_usdot, 0),
5625 NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
5626 NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
5627 NEONMAP0(vuzp_v),
5628 NEONMAP0(vuzpq_v),
5629 NEONMAP0(vzip_v),
5630 NEONMAP0(vzipq_v)
5631};
5632
5633static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
5634 NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0),
5635 NEONMAP0(splat_lane_v),
5636 NEONMAP0(splat_laneq_v),
5637 NEONMAP0(splatq_lane_v),
5638 NEONMAP0(splatq_laneq_v),
5639 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
5640 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
5641 NEONMAP0(vaddhn_v),
5642 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
5643 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
5644 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
5645 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
5646 NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
5647 NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
5648 NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
5649 NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0),
5650 NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0),
5651 NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5652 NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5653 NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5654 NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5655 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
5656 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
5657 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
5658 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
5659 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
5660 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
5661 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
5662 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
5663 NEONMAP0(vceqz_v),
5664 NEONMAP0(vceqzq_v),
5665 NEONMAP0(vcgez_v),
5666 NEONMAP0(vcgezq_v),
5667 NEONMAP0(vcgtz_v),
5668 NEONMAP0(vcgtzq_v),
5669 NEONMAP0(vclez_v),
5670 NEONMAP0(vclezq_v),
5671 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
5672 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
5673 NEONMAP0(vcltz_v),
5674 NEONMAP0(vcltzq_v),
5675 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5676 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5677 NEONMAP1(vcmla_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
5678 NEONMAP1(vcmla_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
5679 NEONMAP1(vcmla_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
5680 NEONMAP1(vcmla_v, aarch64_neon_vcmla_rot0, Add1ArgType),
5681 NEONMAP1(vcmlaq_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
5682 NEONMAP1(vcmlaq_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
5683 NEONMAP1(vcmlaq_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
5684 NEONMAP1(vcmlaq_v, aarch64_neon_vcmla_rot0, Add1ArgType),
5685 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5686 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5687 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
5688 NEONMAP0(vcvt_f16_v),
5689 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
5690 NEONMAP0(vcvt_f32_v),
5691 NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5692 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5693 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5694 NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5695 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5696 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5697 NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5698 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5699 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5700 NEONMAP0(vcvtq_f16_v),
5701 NEONMAP0(vcvtq_f32_v),
5702 NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0),
5703 NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5704 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5705 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5706 NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5707 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5708 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5709 NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5710 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5711 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5712 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
5713 NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5714 NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5715 NEONMAP0(vext_v),
5716 NEONMAP0(vextq_v),
5717 NEONMAP0(vfma_v),
5718 NEONMAP0(vfmaq_v),
5719 NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
5720 NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
5721 NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
5722 NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
5723 NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
5724 NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
5725 NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
5726 NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
5727 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5728 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5729 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5730 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5731 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
5732 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
5733 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
5734 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
5735 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
5736 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
5737 NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0),
5738 NEONMAP0(vmovl_v),
5739 NEONMAP0(vmovn_v),
5740 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
5741 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
5742 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
5743 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5744 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5745 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
5746 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
5747 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
5748 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5749 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5750 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
5751 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
5752 NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
5753 NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
5754 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
5755 NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
5756 NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
5757 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
5758 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
5759 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
5760 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
5761 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
5762 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
5763 NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
5764 NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
5765 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
5766 NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
5767 NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
5768 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
5769 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5770 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5771 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
5772 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5773 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
5774 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5775 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
5776 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
5777 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5778 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5779 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
5780 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5781 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5782 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
5783 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
5784 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5785 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5786 NEONMAP0(vrndi_v),
5787 NEONMAP0(vrndiq_v),
5788 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5789 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5790 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5791 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5792 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5793 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5794 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
5795 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
5796 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
5797 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
5798 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
5799 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
5800 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
5801 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
5802 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
5803 NEONMAP0(vshl_n_v),
5804 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5805 NEONMAP0(vshll_n_v),
5806 NEONMAP0(vshlq_n_v),
5807 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5808 NEONMAP0(vshr_n_v),
5809 NEONMAP0(vshrn_n_v),
5810 NEONMAP0(vshrq_n_v),
5811 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
5812 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
5813 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
5814 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
5815 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
5816 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
5817 NEONMAP0(vsubhn_v),
5818 NEONMAP0(vtst_v),
5819 NEONMAP0(vtstq_v),
5820 NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
5821 NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
5822 NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
5823};
5824
5825static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
5826 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
5827 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
5828 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
5829 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5830 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5831 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5832 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5833 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5834 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5835 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5836 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5837 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
5838 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5839 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
5840 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5841 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5842 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5843 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5844 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5845 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5846 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5847 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5848 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5849 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5850 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5851 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5852 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5853 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5854 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5855 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5856 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5857 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5858 NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5859 NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5860 NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
5861 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5862 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5863 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5864 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5865 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5866 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5867 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5868 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5869 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5870 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5871 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5872 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5873 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5874 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5875 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5876 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5877 NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
5878 NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
5879 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
5880 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5881 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5882 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5883 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5884 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5885 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5886 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5887 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5888 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5889 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5890 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5891 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5892 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5893 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5894 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5895 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5896 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5897 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5898 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5899 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5900 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
5901 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
5902 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
5903 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5904 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5905 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5906 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5907 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5908 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5909 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5910 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5911 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5912 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5913 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5914 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
5915 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5916 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
5917 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5918 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5919 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
5920 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
5921 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5922 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5923 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
5924 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
5925 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
5926 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
5927 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
5928 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
5929 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
5930 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
5931 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5932 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5933 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5934 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5935 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
5936 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5937 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5938 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5939 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
5940 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5941 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
5942 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
5943 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
5944 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5945 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5946 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
5947 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
5948 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5949 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5950 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
5951 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
5952 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
5953 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
5954 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5955 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5956 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5957 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5958 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
5959 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5960 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5961 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5962 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5963 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5964 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5965 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
5966 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
5967 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5968 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5969 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5970 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5971 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
5972 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
5973 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
5974 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
5975 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5976 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5977 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
5978 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
5979 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
5980 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5981 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5982 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5983 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5984 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
5985 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5986 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5987 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5988 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5989 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
5990 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
5991 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5992 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5993 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
5994 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
5995 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
5996 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
5997 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
5998 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
5999 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
6000 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
6001 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
6002 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
6003 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
6004 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
6005 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
6006 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
6007 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
6008 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
6009 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
6010 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
6011 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
6012 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
6013 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
6014 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
6015 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
6016 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
6017 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
6018 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
6019 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
6020 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
6021 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
6022 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
6023 // FP16 scalar intrinisics go here.
6024 NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
6025 NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6026 NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6027 NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6028 NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6029 NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6030 NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6031 NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6032 NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6033 NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6034 NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6035 NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6036 NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6037 NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6038 NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6039 NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6040 NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6041 NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6042 NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6043 NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6044 NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6045 NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6046 NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6047 NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6048 NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6049 NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6050 NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6051 NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6052 NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6053 NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
6054 NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
6055 NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
6056 NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
6057 NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
6058};
6059
6060#undef NEONMAP0
6061#undef NEONMAP1
6062#undef NEONMAP2
6063
6064#define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
6065 { \
6066 #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
6067 TypeModifier \
6068 }
6069
6070#define SVEMAP2(NameBase, TypeModifier) \
6071 { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
6072static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
6073#define GET_SVE_LLVM_INTRINSIC_MAP
6074#include "clang/Basic/arm_sve_builtin_cg.inc"
6075#undef GET_SVE_LLVM_INTRINSIC_MAP
6076};
6077
6078#undef SVEMAP1
6079#undef SVEMAP2
6080
6081static bool NEONSIMDIntrinsicsProvenSorted = false;
6082
6083static bool AArch64SIMDIntrinsicsProvenSorted = false;
6084static bool AArch64SISDIntrinsicsProvenSorted = false;
6085static bool AArch64SVEIntrinsicsProvenSorted = false;
6086
6087static const ARMVectorIntrinsicInfo *
6088findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
6089 unsigned BuiltinID, bool &MapProvenSorted) {
6090
6091#ifndef NDEBUG
6092 if (!MapProvenSorted) {
6093 assert(llvm::is_sorted(IntrinsicMap));
6094 MapProvenSorted = true;
6095 }
6096#endif
6097
6098 const ARMVectorIntrinsicInfo *Builtin =
6099 llvm::lower_bound(IntrinsicMap, BuiltinID);
6100
6101 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
6102 return Builtin;
6103
6104 return nullptr;
6105}
6106
6107Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
6108 unsigned Modifier,
6109 llvm::Type *ArgType,
6110 const CallExpr *E) {
6111 int VectorSize = 0;
6112 if (Modifier & Use64BitVectors)
6113 VectorSize = 64;
6114 else if (Modifier & Use128BitVectors)
6115 VectorSize = 128;
6116
6117 // Return type.
6118 SmallVector<llvm::Type *, 3> Tys;
6119 if (Modifier & AddRetType) {
6120 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
6121 if (Modifier & VectorizeRetType)
6122 Ty = llvm::FixedVectorType::get(
6123 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
6124
6125 Tys.push_back(Ty);
6126 }
6127
6128 // Arguments.
6129 if (Modifier & VectorizeArgTypes) {
6130 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
6131 ArgType = llvm::FixedVectorType::get(ArgType, Elts);
6132 }
6133
6134 if (Modifier & (Add1ArgType | Add2ArgTypes))
6135 Tys.push_back(ArgType);
6136
6137 if (Modifier & Add2ArgTypes)
6138 Tys.push_back(ArgType);
6139
6140 if (Modifier & InventFloatType)
6141 Tys.push_back(FloatTy);
6142
6143 return CGM.getIntrinsic(IntrinsicID, Tys);
6144}
6145
6146static Value *EmitCommonNeonSISDBuiltinExpr(
6147 CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
6148 SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
6149 unsigned BuiltinID = SISDInfo.BuiltinID;
6150 unsigned int Int = SISDInfo.LLVMIntrinsic;
6151 unsigned Modifier = SISDInfo.TypeModifier;
6152 const char *s = SISDInfo.NameHint;
6153
6154 switch (BuiltinID) {
6155 case NEON::BI__builtin_neon_vcled_s64:
6156 case NEON::BI__builtin_neon_vcled_u64:
6157 case NEON::BI__builtin_neon_vcles_f32:
6158 case NEON::BI__builtin_neon_vcled_f64:
6159 case NEON::BI__builtin_neon_vcltd_s64:
6160 case NEON::BI__builtin_neon_vcltd_u64:
6161 case NEON::BI__builtin_neon_vclts_f32:
6162 case NEON::BI__builtin_neon_vcltd_f64:
6163 case NEON::BI__builtin_neon_vcales_f32:
6164 case NEON::BI__builtin_neon_vcaled_f64:
6165 case NEON::BI__builtin_neon_vcalts_f32:
6166 case NEON::BI__builtin_neon_vcaltd_f64:
6167 // Only one direction of comparisons actually exist, cmle is actually a cmge
6168 // with swapped operands. The table gives us the right intrinsic but we
6169 // still need to do the swap.
6170 std::swap(Ops[0], Ops[1]);
6171 break;
6172 }
6173
6174 assert(Int && "Generic code assumes a valid intrinsic");
6175
6176 // Determine the type(s) of this overloaded AArch64 intrinsic.
6177 const Expr *Arg = E->getArg(0);
6178 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
6179 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
6180
6181 int j = 0;
6182 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
6183 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
6184 ai != ae; ++ai, ++j) {
6185 llvm::Type *ArgTy = ai->getType();
6186 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
6187 ArgTy->getPrimitiveSizeInBits())
6188 continue;
6189
6190 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
6191 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
6192 // it before inserting.
6193 Ops[j] = CGF.Builder.CreateTruncOrBitCast(
6194 Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
6195 Ops[j] =
6196 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
6197 }
6198
6199 Value *Result = CGF.EmitNeonCall(F, Ops, s);
6200 llvm::Type *ResultType = CGF.ConvertType(E->getType());
6201 if (ResultType->getPrimitiveSizeInBits().getFixedSize() <
6202 Result->getType()->getPrimitiveSizeInBits().getFixedSize())
6203 return CGF.Builder.CreateExtractElement(Result, C0);
6204
6205 return CGF.Builder.CreateBitCast(Result, ResultType, s);
6206}
6207
6208Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
6209 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
6210 const char *NameHint, unsigned Modifier, const CallExpr *E,
6211 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
6212 llvm::Triple::ArchType Arch) {
6213 // Get the last argument, which specifies the vector type.
6214 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
6215 Optional<llvm::APSInt> NeonTypeConst =
6216 Arg->getIntegerConstantExpr(getContext());
6217 if (!NeonTypeConst)
6218 return nullptr;
6219
6220 // Determine the type of this overloaded NEON intrinsic.
6221 NeonTypeFlags Type(NeonTypeConst->getZExtValue());
6222 bool Usgn = Type.isUnsigned();
6223 bool Quad = Type.isQuad();
6224 const bool HasLegalHalfType = getTarget().hasLegalHalfType();
6225 const bool AllowBFloatArgsAndRet =
6226 getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
6227
6228 llvm::FixedVectorType *VTy =
6229 GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet);
6230 llvm::Type *Ty = VTy;
6231 if (!Ty)
6232 return nullptr;
6233
6234 auto getAlignmentValue32 = [&](Address addr) -> Value* {
6235 return Builder.getInt32(addr.getAlignment().getQuantity());
6236 };
6237
6238 unsigned Int = LLVMIntrinsic;
6239 if ((Modifier & UnsignedAlts) && !Usgn)
6240 Int = AltLLVMIntrinsic;
6241
6242 switch (BuiltinID) {
6243 default: break;
6244 case NEON::BI__builtin_neon_splat_lane_v:
6245 case NEON::BI__builtin_neon_splat_laneq_v:
6246 case NEON::BI__builtin_neon_splatq_lane_v:
6247 case NEON::BI__builtin_neon_splatq_laneq_v: {
6248 auto NumElements = VTy->getElementCount();
6249 if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
6250 NumElements = NumElements * 2;
6251 if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
6252 NumElements = NumElements.divideCoefficientBy(2);
6253
6254 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
6255 return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
6256 }
6257 case NEON::BI__builtin_neon_vpadd_v:
6258 case NEON::BI__builtin_neon_vpaddq_v:
6259 // We don't allow fp/int overloading of intrinsics.
6260 if (VTy->getElementType()->isFloatingPointTy() &&
6261 Int == Intrinsic::aarch64_neon_addp)
6262 Int = Intrinsic::aarch64_neon_faddp;
6263 break;
6264 case NEON::BI__builtin_neon_vabs_v:
6265 case NEON::BI__builtin_neon_vabsq_v:
6266 if (VTy->getElementType()->isFloatingPointTy())
6267 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
6268 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
6269 case NEON::BI__builtin_neon_vaddhn_v: {
6270 llvm::FixedVectorType *SrcTy =
6271 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6272
6273 // %sum = add <4 x i32> %lhs, %rhs
6274 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6275 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
6276 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
6277
6278 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
6279 Constant *ShiftAmt =
6280 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
6281 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
6282
6283 // %res = trunc <4 x i32> %high to <4 x i16>
6284 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
6285 }
6286 case NEON::BI__builtin_neon_vcale_v:
6287 case NEON::BI__builtin_neon_vcaleq_v:
6288 case NEON::BI__builtin_neon_vcalt_v:
6289 case NEON::BI__builtin_neon_vcaltq_v:
6290 std::swap(Ops[0], Ops[1]);
6291 LLVM_FALLTHROUGH;
6292 case NEON::BI__builtin_neon_vcage_v:
6293 case NEON::BI__builtin_neon_vcageq_v:
6294 case NEON::BI__builtin_neon_vcagt_v:
6295 case NEON::BI__builtin_neon_vcagtq_v: {
6296 llvm::Type *Ty;
6297 switch (VTy->getScalarSizeInBits()) {
6298 default: llvm_unreachable("unexpected type");
6299 case 32:
6300 Ty = FloatTy;
6301 break;
6302 case 64:
6303 Ty = DoubleTy;
6304 break;
6305 case 16:
6306 Ty = HalfTy;
6307 break;
6308 }
6309 auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
6310 llvm::Type *Tys[] = { VTy, VecFlt };
6311 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6312 return EmitNeonCall(F, Ops, NameHint);
6313 }
6314 case NEON::BI__builtin_neon_vceqz_v:
6315 case NEON::BI__builtin_neon_vceqzq_v:
6316 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
6317 ICmpInst::ICMP_EQ, "vceqz");
6318 case NEON::BI__builtin_neon_vcgez_v:
6319 case NEON::BI__builtin_neon_vcgezq_v:
6320 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
6321 ICmpInst::ICMP_SGE, "vcgez");
6322 case NEON::BI__builtin_neon_vclez_v:
6323 case NEON::BI__builtin_neon_vclezq_v:
6324 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
6325 ICmpInst::ICMP_SLE, "vclez");
6326 case NEON::BI__builtin_neon_vcgtz_v:
6327 case NEON::BI__builtin_neon_vcgtzq_v:
6328 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
6329 ICmpInst::ICMP_SGT, "vcgtz");
6330 case NEON::BI__builtin_neon_vcltz_v:
6331 case NEON::BI__builtin_neon_vcltzq_v:
6332 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
6333 ICmpInst::ICMP_SLT, "vcltz");
6334 case NEON::BI__builtin_neon_vclz_v:
6335 case NEON::BI__builtin_neon_vclzq_v:
6336 // We generate target-independent intrinsic, which needs a second argument
6337 // for whether or not clz of zero is undefined; on ARM it isn't.
6338 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
6339 break;
6340 case NEON::BI__builtin_neon_vcvt_f32_v:
6341 case NEON::BI__builtin_neon_vcvtq_f32_v:
6342 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6343 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
6344 HasLegalHalfType);
6345 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
6346 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
6347 case NEON::BI__builtin_neon_vcvt_f16_v:
6348 case NEON::BI__builtin_neon_vcvtq_f16_v:
6349 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6350 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
6351 HasLegalHalfType);
6352 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
6353 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
6354 case NEON::BI__builtin_neon_vcvt_n_f16_v:
6355 case NEON::BI__builtin_neon_vcvt_n_f32_v:
6356 case NEON::BI__builtin_neon_vcvt_n_f64_v:
6357 case NEON::BI__builtin_neon_vcvtq_n_f16_v:
6358 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
6359 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
6360 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
6361 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6362 Function *F = CGM.getIntrinsic(Int, Tys);
6363 return EmitNeonCall(F, Ops, "vcvt_n");
6364 }
6365 case NEON::BI__builtin_neon_vcvt_n_s16_v:
6366 case NEON::BI__builtin_neon_vcvt_n_s32_v:
6367 case NEON::BI__builtin_neon_vcvt_n_u16_v:
6368 case NEON::BI__builtin_neon_vcvt_n_u32_v:
6369 case NEON::BI__builtin_neon_vcvt_n_s64_v:
6370 case NEON::BI__builtin_neon_vcvt_n_u64_v:
6371 case NEON::BI__builtin_neon_vcvtq_n_s16_v:
6372 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
6373 case NEON::BI__builtin_neon_vcvtq_n_u16_v:
6374 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
6375 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
6376 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
6377 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
6378 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6379 return EmitNeonCall(F, Ops, "vcvt_n");
6380 }
6381 case NEON::BI__builtin_neon_vcvt_s32_v:
6382 case NEON::BI__builtin_neon_vcvt_u32_v:
6383 case NEON::BI__builtin_neon_vcvt_s64_v:
6384 case NEON::BI__builtin_neon_vcvt_u64_v:
6385 case NEON::BI__builtin_neon_vcvt_s16_v:
6386 case NEON::BI__builtin_neon_vcvt_u16_v:
6387 case NEON::BI__builtin_neon_vcvtq_s32_v:
6388 case NEON::BI__builtin_neon_vcvtq_u32_v:
6389 case NEON::BI__builtin_neon_vcvtq_s64_v:
6390 case NEON::BI__builtin_neon_vcvtq_u64_v:
6391 case NEON::BI__builtin_neon_vcvtq_s16_v:
6392 case NEON::BI__builtin_neon_vcvtq_u16_v: {
6393 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
6394 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
6395 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
6396 }
6397 case NEON::BI__builtin_neon_vcvta_s16_v:
6398 case NEON::BI__builtin_neon_vcvta_s32_v:
6399 case NEON::BI__builtin_neon_vcvta_s64_v:
6400 case NEON::BI__builtin_neon_vcvta_u16_v:
6401 case NEON::BI__builtin_neon_vcvta_u32_v:
6402 case NEON::BI__builtin_neon_vcvta_u64_v:
6403 case NEON::BI__builtin_neon_vcvtaq_s16_v:
6404 case NEON::BI__builtin_neon_vcvtaq_s32_v:
6405 case NEON::BI__builtin_neon_vcvtaq_s64_v:
6406 case NEON::BI__builtin_neon_vcvtaq_u16_v:
6407 case NEON::BI__builtin_neon_vcvtaq_u32_v:
6408 case NEON::BI__builtin_neon_vcvtaq_u64_v:
6409 case NEON::BI__builtin_neon_vcvtn_s16_v:
6410 case NEON::BI__builtin_neon_vcvtn_s32_v:
6411 case NEON::BI__builtin_neon_vcvtn_s64_v:
6412 case NEON::BI__builtin_neon_vcvtn_u16_v:
6413 case NEON::BI__builtin_neon_vcvtn_u32_v:
6414 case NEON::BI__builtin_neon_vcvtn_u64_v:
6415 case NEON::BI__builtin_neon_vcvtnq_s16_v:
6416 case NEON::BI__builtin_neon_vcvtnq_s32_v:
6417 case NEON::BI__builtin_neon_vcvtnq_s64_v:
6418 case NEON::BI__builtin_neon_vcvtnq_u16_v:
6419 case NEON::BI__builtin_neon_vcvtnq_u32_v:
6420 case NEON::BI__builtin_neon_vcvtnq_u64_v:
6421 case NEON::BI__builtin_neon_vcvtp_s16_v:
6422 case NEON::BI__builtin_neon_vcvtp_s32_v:
6423 case NEON::BI__builtin_neon_vcvtp_s64_v:
6424 case NEON::BI__builtin_neon_vcvtp_u16_v:
6425 case NEON::BI__builtin_neon_vcvtp_u32_v:
6426 case NEON::BI__builtin_neon_vcvtp_u64_v:
6427 case NEON::BI__builtin_neon_vcvtpq_s16_v:
6428 case NEON::BI__builtin_neon_vcvtpq_s32_v:
6429 case NEON::BI__builtin_neon_vcvtpq_s64_v:
6430 case NEON::BI__builtin_neon_vcvtpq_u16_v:
6431 case NEON::BI__builtin_neon_vcvtpq_u32_v:
6432 case NEON::BI__builtin_neon_vcvtpq_u64_v:
6433 case NEON::BI__builtin_neon_vcvtm_s16_v:
6434 case NEON::BI__builtin_neon_vcvtm_s32_v:
6435 case NEON::BI__builtin_neon_vcvtm_s64_v:
6436 case NEON::BI__builtin_neon_vcvtm_u16_v:
6437 case NEON::BI__builtin_neon_vcvtm_u32_v:
6438 case NEON::BI__builtin_neon_vcvtm_u64_v:
6439 case NEON::BI__builtin_neon_vcvtmq_s16_v:
6440 case NEON::BI__builtin_neon_vcvtmq_s32_v:
6441 case NEON::BI__builtin_neon_vcvtmq_s64_v:
6442 case NEON::BI__builtin_neon_vcvtmq_u16_v:
6443 case NEON::BI__builtin_neon_vcvtmq_u32_v:
6444 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
6445 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
6446 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
6447 }
6448 case NEON::BI__builtin_neon_vcvtx_f32_v: {
6449 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
6450 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
6451
6452 }
6453 case NEON::BI__builtin_neon_vext_v:
6454 case NEON::BI__builtin_neon_vextq_v: {
6455 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
6456 SmallVector<int, 16> Indices;
6457 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
6458 Indices.push_back(i+CV);
6459
6460 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6461 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6462 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
6463 }
6464 case NEON::BI__builtin_neon_vfma_v:
6465 case NEON::BI__builtin_neon_vfmaq_v: {
6466 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6467 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6468 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6469
6470 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
6471 return emitCallMaybeConstrainedFPBuiltin(
6472 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
6473 {Ops[1], Ops[2], Ops[0]});
6474 }
6475 case NEON::BI__builtin_neon_vld1_v:
6476 case NEON::BI__builtin_neon_vld1q_v: {
6477 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6478 Ops.push_back(getAlignmentValue32(PtrOp0));
6479 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
6480 }
6481 case NEON::BI__builtin_neon_vld1_x2_v:
6482 case NEON::BI__builtin_neon_vld1q_x2_v:
6483 case NEON::BI__builtin_neon_vld1_x3_v:
6484 case NEON::BI__builtin_neon_vld1q_x3_v:
6485 case NEON::BI__builtin_neon_vld1_x4_v:
6486 case NEON::BI__builtin_neon_vld1q_x4_v: {
6487 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
6488 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
6489 llvm::Type *Tys[2] = { VTy, PTy };
6490 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6491 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
6492 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6493 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6494 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6495 }
6496 case NEON::BI__builtin_neon_vld2_v:
6497 case NEON::BI__builtin_neon_vld2q_v:
6498 case NEON::BI__builtin_neon_vld3_v:
6499 case NEON::BI__builtin_neon_vld3q_v:
6500 case NEON::BI__builtin_neon_vld4_v:
6501 case NEON::BI__builtin_neon_vld4q_v:
6502 case NEON::BI__builtin_neon_vld2_dup_v:
6503 case NEON::BI__builtin_neon_vld2q_dup_v:
6504 case NEON::BI__builtin_neon_vld3_dup_v:
6505 case NEON::BI__builtin_neon_vld3q_dup_v:
6506 case NEON::BI__builtin_neon_vld4_dup_v:
6507 case NEON::BI__builtin_neon_vld4q_dup_v: {
6508 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6509 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6510 Value *Align = getAlignmentValue32(PtrOp1);
6511 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
6512 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6513 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6514 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6515 }
6516 case NEON::BI__builtin_neon_vld1_dup_v:
6517 case NEON::BI__builtin_neon_vld1q_dup_v: {
6518 Value *V = UndefValue::get(Ty);
6519 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
6520 PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
6521 LoadInst *Ld = Builder.CreateLoad(PtrOp0);
6522 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
6523 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
6524 return EmitNeonSplat(Ops[0], CI);
6525 }
6526 case NEON::BI__builtin_neon_vld2_lane_v:
6527 case NEON::BI__builtin_neon_vld2q_lane_v:
6528 case NEON::BI__builtin_neon_vld3_lane_v:
6529 case NEON::BI__builtin_neon_vld3q_lane_v:
6530 case NEON::BI__builtin_neon_vld4_lane_v:
6531 case NEON::BI__builtin_neon_vld4q_lane_v: {
6532 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6533 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6534 for (unsigned I = 2; I < Ops.size() - 1; ++I)
6535 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
6536 Ops.push_back(getAlignmentValue32(PtrOp1));
6537 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
6538 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6539 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6540 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6541 }
6542 case NEON::BI__builtin_neon_vmovl_v: {
6543 llvm::FixedVectorType *DTy =
6544 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
6545 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
6546 if (Usgn)
6547 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
6548 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
6549 }
6550 case NEON::BI__builtin_neon_vmovn_v: {
6551 llvm::FixedVectorType *QTy =
6552 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6553 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
6554 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
6555 }
6556 case NEON::BI__builtin_neon_vmull_v:
6557 // FIXME: the integer vmull operations could be emitted in terms of pure
6558 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
6559 // hoisting the exts outside loops. Until global ISel comes along that can
6560 // see through such movement this leads to bad CodeGen. So we need an
6561 // intrinsic for now.
6562 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
6563 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
6564 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
6565 case NEON::BI__builtin_neon_vpadal_v:
6566 case NEON::BI__builtin_neon_vpadalq_v: {
6567 // The source operand type has twice as many elements of half the size.
6568 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
6569 llvm::Type *EltTy =
6570 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
6571 auto *NarrowTy =
6572 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
6573 llvm::Type *Tys[2] = { Ty, NarrowTy };
6574 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6575 }
6576 case NEON::BI__builtin_neon_vpaddl_v:
6577 case NEON::BI__builtin_neon_vpaddlq_v: {
6578 // The source operand type has twice as many elements of half the size.
6579 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
6580 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
6581 auto *NarrowTy =
6582 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
6583 llvm::Type *Tys[2] = { Ty, NarrowTy };
6584 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
6585 }
6586 case NEON::BI__builtin_neon_vqdmlal_v:
6587 case NEON::BI__builtin_neon_vqdmlsl_v: {
6588 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
6589 Ops[1] =
6590 EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
6591 Ops.resize(2);
6592 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
6593 }
6594 case NEON::BI__builtin_neon_vqdmulhq_lane_v:
6595 case NEON::BI__builtin_neon_vqdmulh_lane_v:
6596 case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
6597 case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
6598 auto *RTy = cast<llvm::FixedVectorType>(Ty);
6599 if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
6600 BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
6601 RTy = llvm::FixedVectorType::get(RTy->getElementType(),
6602 RTy->getNumElements() * 2);
6603 llvm::Type *Tys[2] = {
6604 RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6605 /*isQuad*/ false))};
6606 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6607 }
6608 case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
6609 case NEON::BI__builtin_neon_vqdmulh_laneq_v:
6610 case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
6611 case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
6612 llvm::Type *Tys[2] = {
6613 Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6614 /*isQuad*/ true))};
6615 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6616 }
6617 case NEON::BI__builtin_neon_vqshl_n_v:
6618 case NEON::BI__builtin_neon_vqshlq_n_v:
6619 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
6620 1, false);
6621 case NEON::BI__builtin_neon_vqshlu_n_v:
6622 case NEON::BI__builtin_neon_vqshluq_n_v:
6623 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
6624 1, false);
6625 case NEON::BI__builtin_neon_vrecpe_v:
6626 case NEON::BI__builtin_neon_vrecpeq_v:
6627 case NEON::BI__builtin_neon_vrsqrte_v:
6628 case NEON::BI__builtin_neon_vrsqrteq_v:
6629 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
6630 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6631 case NEON::BI__builtin_neon_vrndi_v:
6632 case NEON::BI__builtin_neon_vrndiq_v:
6633 Int = Builder.getIsFPConstrained()
6634 ? Intrinsic::experimental_constrained_nearbyint
6635 : Intrinsic::nearbyint;
6636 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6637 case NEON::BI__builtin_neon_vrshr_n_v:
6638 case NEON::BI__builtin_neon_vrshrq_n_v:
6639 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
6640 1, true);
6641 case NEON::BI__builtin_neon_vshl_n_v:
6642 case NEON::BI__builtin_neon_vshlq_n_v:
6643 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
6644 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
6645 "vshl_n");
6646 case NEON::BI__builtin_neon_vshll_n_v: {
6647 llvm::FixedVectorType *SrcTy =
6648 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
6649 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6650 if (Usgn)
6651 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
6652 else
6653 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
6654 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
6655 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
6656 }
6657 case NEON::BI__builtin_neon_vshrn_n_v: {
6658 llvm::FixedVectorType *SrcTy =
6659 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6660 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6661 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
6662 if (Usgn)
6663 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
6664 else
6665 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
6666 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
6667 }
6668 case NEON::BI__builtin_neon_vshr_n_v:
6669 case NEON::BI__builtin_neon_vshrq_n_v:
6670 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
6671 case NEON::BI__builtin_neon_vst1_v:
6672 case NEON::BI__builtin_neon_vst1q_v:
6673 case NEON::BI__builtin_neon_vst2_v:
6674 case NEON::BI__builtin_neon_vst2q_v:
6675 case NEON::BI__builtin_neon_vst3_v:
6676 case NEON::BI__builtin_neon_vst3q_v:
6677 case NEON::BI__builtin_neon_vst4_v:
6678 case NEON::BI__builtin_neon_vst4q_v:
6679 case NEON::BI__builtin_neon_vst2_lane_v:
6680 case NEON::BI__builtin_neon_vst2q_lane_v:
6681 case NEON::BI__builtin_neon_vst3_lane_v:
6682 case NEON::BI__builtin_neon_vst3q_lane_v:
6683 case NEON::BI__builtin_neon_vst4_lane_v:
6684 case NEON::BI__builtin_neon_vst4q_lane_v: {
6685 llvm::Type *Tys[] = {Int8PtrTy, Ty};
6686 Ops.push_back(getAlignmentValue32(PtrOp0));
6687 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
6688 }
6689 case NEON::BI__builtin_neon_vst1_x2_v:
6690 case NEON::BI__builtin_neon_vst1q_x2_v:
6691 case NEON::BI__builtin_neon_vst1_x3_v:
6692 case NEON::BI__builtin_neon_vst1q_x3_v:
6693 case NEON::BI__builtin_neon_vst1_x4_v:
6694 case NEON::BI__builtin_neon_vst1q_x4_v: {
6695 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
6696 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
6697 // in AArch64 it comes last. We may want to stick to one or another.
6698 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
6699 Arch == llvm::Triple::aarch64_32) {
6700 llvm::Type *Tys[2] = { VTy, PTy };
6701 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
6702 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
6703 }
6704 llvm::Type *Tys[2] = { PTy, VTy };
6705 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
6706 }
6707 case NEON::BI__builtin_neon_vsubhn_v: {
6708 llvm::FixedVectorType *SrcTy =
6709 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6710
6711 // %sum = add <4 x i32> %lhs, %rhs
6712 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6713 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
6714 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
6715
6716 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
6717 Constant *ShiftAmt =
6718 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
6719 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
6720
6721 // %res = trunc <4 x i32> %high to <4 x i16>
6722 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
6723 }
6724 case NEON::BI__builtin_neon_vtrn_v:
6725 case NEON::BI__builtin_neon_vtrnq_v: {
6726 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6727 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6728 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6729 Value *SV = nullptr;
6730
6731 for (unsigned vi = 0; vi != 2; ++vi) {
6732 SmallVector<int, 16> Indices;
6733 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6734 Indices.push_back(i+vi);
6735 Indices.push_back(i+e+vi);
6736 }
6737 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6738 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
6739 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6740 }
6741 return SV;
6742 }
6743 case NEON::BI__builtin_neon_vtst_v:
6744 case NEON::BI__builtin_neon_vtstq_v: {
6745 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6746 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6747 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
6748 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
6749 ConstantAggregateZero::get(Ty));
6750 return Builder.CreateSExt(Ops[0], Ty, "vtst");
6751 }
6752 case NEON::BI__builtin_neon_vuzp_v:
6753 case NEON::BI__builtin_neon_vuzpq_v: {
6754 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6755 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6756 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6757 Value *SV = nullptr;
6758
6759 for (unsigned vi = 0; vi != 2; ++vi) {
6760 SmallVector<int, 16> Indices;
6761 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
6762 Indices.push_back(2*i+vi);
6763
6764 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6765 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
6766 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6767 }
6768 return SV;
6769 }
6770 case NEON::BI__builtin_neon_vzip_v:
6771 case NEON::BI__builtin_neon_vzipq_v: {
6772 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
6773 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6774 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6775 Value *SV = nullptr;
6776
6777 for (unsigned vi = 0; vi != 2; ++vi) {
6778 SmallVector<int, 16> Indices;
6779 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6780 Indices.push_back((i + vi*e) >> 1);
6781 Indices.push_back(((i + vi*e) >> 1)+e);
6782 }
6783 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6784 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
6785 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6786 }
6787 return SV;
6788 }
6789 case NEON::BI__builtin_neon_vdot_v:
6790 case NEON::BI__builtin_neon_vdotq_v: {
6791 auto *InputTy =
6792 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6793 llvm::Type *Tys[2] = { Ty, InputTy };
6794 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6795 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
6796 }
6797 case NEON::BI__builtin_neon_vfmlal_low_v:
6798 case NEON::BI__builtin_neon_vfmlalq_low_v: {
6799 auto *InputTy =
6800 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6801 llvm::Type *Tys[2] = { Ty, InputTy };
6802 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
6803 }
6804 case NEON::BI__builtin_neon_vfmlsl_low_v:
6805 case NEON::BI__builtin_neon_vfmlslq_low_v: {
6806 auto *InputTy =
6807 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6808 llvm::Type *Tys[2] = { Ty, InputTy };
6809 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
6810 }
6811 case NEON::BI__builtin_neon_vfmlal_high_v:
6812 case NEON::BI__builtin_neon_vfmlalq_high_v: {
6813 auto *InputTy =
6814 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6815 llvm::Type *Tys[2] = { Ty, InputTy };
6816 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
6817 }
6818 case NEON::BI__builtin_neon_vfmlsl_high_v:
6819 case NEON::BI__builtin_neon_vfmlslq_high_v: {
6820 auto *InputTy =
6821 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6822 llvm::Type *Tys[2] = { Ty, InputTy };
6823 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
6824 }
6825 case NEON::BI__builtin_neon_vmmlaq_v: {
6826 auto *InputTy =
6827 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6828 llvm::Type *Tys[2] = { Ty, InputTy };
6829 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6830 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla");
6831 }
6832 case NEON::BI__builtin_neon_vusmmlaq_v: {
6833 auto *InputTy =
6834 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6835 llvm::Type *Tys[2] = { Ty, InputTy };
6836 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
6837 }
6838 case NEON::BI__builtin_neon_vusdot_v:
6839 case NEON::BI__builtin_neon_vusdotq_v: {
6840 auto *InputTy =
6841 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6842 llvm::Type *Tys[2] = { Ty, InputTy };
6843 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
6844 }
6845 case NEON::BI__builtin_neon_vbfdot_v:
6846 case NEON::BI__builtin_neon_vbfdotq_v: {
6847 llvm::Type *InputTy =
6848 llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
6849 llvm::Type *Tys[2] = { Ty, InputTy };
6850 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
6851 }
6852 case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
6853 llvm::Type *Tys[1] = { Ty };
6854 Function *F = CGM.getIntrinsic(Int, Tys);
6855 return EmitNeonCall(F, Ops, "vcvtfp2bf");
6856 }
6857
6858 }
6859
6860 assert(Int && "Expected valid intrinsic number");
6861
6862 // Determine the type(s) of this overloaded AArch64 intrinsic.
6863 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
6864
6865 Value *Result = EmitNeonCall(F, Ops, NameHint);
6866 llvm::Type *ResultType = ConvertType(E->getType());
6867 // AArch64 intrinsic one-element vector type cast to
6868 // scalar type expected by the builtin
6869 return Builder.CreateBitCast(Result, ResultType, NameHint);
6870}
6871
6872Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
6873 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
6874 const CmpInst::Predicate Ip, const Twine &Name) {
6875 llvm::Type *OTy = Op->getType();
6876
6877 // FIXME: this is utterly horrific. We should not be looking at previous
6878 // codegen context to find out what needs doing. Unfortunately TableGen
6879 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
6880 // (etc).
6881 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
6882 OTy = BI->getOperand(0)->getType();
6883
6884 Op = Builder.CreateBitCast(Op, OTy);
6885 if (OTy->getScalarType()->isFloatingPointTy()) {
6886 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
6887 } else {
6888 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
6889 }
6890 return Builder.CreateSExt(Op, Ty, Name);
6891}
6892
6893static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
6894 Value *ExtOp, Value *IndexOp,
6895 llvm::Type *ResTy, unsigned IntID,
6896 const char *Name) {
6897 SmallVector<Value *, 2> TblOps;
6898 if (ExtOp)
6899 TblOps.push_back(ExtOp);
6900
6901 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
6902 SmallVector<int, 16> Indices;
6903 auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
6904 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
6905 Indices.push_back(2*i);
6906 Indices.push_back(2*i+1);
6907 }
6908
6909 int PairPos = 0, End = Ops.size() - 1;
6910 while (PairPos < End) {
6911 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6912 Ops[PairPos+1], Indices,
6913 Name));
6914 PairPos += 2;
6915 }
6916
6917 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
6918 // of the 128-bit lookup table with zero.
6919 if (PairPos == End) {
6920 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
6921 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6922 ZeroTbl, Indices, Name));
6923 }
6924
6925 Function *TblF;
6926 TblOps.push_back(IndexOp);
6927 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
6928
6929 return CGF.EmitNeonCall(TblF, TblOps, Name);
6930}
6931
6932Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
6933 unsigned Value;
6934 switch (BuiltinID) {
6935 default:
6936 return nullptr;
6937 case ARM::BI__builtin_arm_nop:
6938 Value = 0;
6939 break;
6940 case ARM::BI__builtin_arm_yield:
6941 case ARM::BI__yield:
6942 Value = 1;
6943 break;
6944 case ARM::BI__builtin_arm_wfe:
6945 case ARM::BI__wfe:
6946 Value = 2;
6947 break;
6948 case ARM::BI__builtin_arm_wfi:
6949 case ARM::BI__wfi:
6950 Value = 3;
6951 break;
6952 case ARM::BI__builtin_arm_sev:
6953 case ARM::BI__sev:
6954 Value = 4;
6955 break;
6956 case ARM::BI__builtin_arm_sevl:
6957 case ARM::BI__sevl:
6958 Value = 5;
6959 break;
6960 }
6961
6962 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
6963 llvm::ConstantInt::get(Int32Ty, Value));
6964}
6965
6966enum SpecialRegisterAccessKind {
6967 NormalRead,
6968 VolatileRead,
6969 Write,
6970};
6971
6972// Generates the IR for the read/write special register builtin,
6973// ValueType is the type of the value that is to be written or read,
6974// RegisterType is the type of the register being written to or read from.
6975static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
6976 const CallExpr *E,
6977 llvm::Type *RegisterType,
6978 llvm::Type *ValueType,
6979 SpecialRegisterAccessKind AccessKind,
6980 StringRef SysReg = "") {
6981 // write and register intrinsics only support 32 and 64 bit operations.
6982 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
6983 && "Unsupported size for register.");
6984
6985 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6986 CodeGen::CodeGenModule &CGM = CGF.CGM;
6987 LLVMContext &Context = CGM.getLLVMContext();
6988
6989 if (SysReg.empty()) {
6990 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
6991 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
6992 }
6993
6994 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
6995 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
6996 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
6997
6998 llvm::Type *Types[] = { RegisterType };
6999
7000 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
7001 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
7002 && "Can't fit 64-bit value in 32-bit register");
7003
7004 if (AccessKind != Write) {
7005 assert(AccessKind == NormalRead || AccessKind == VolatileRead);
7006 llvm::Function *F = CGM.getIntrinsic(
7007 AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
7008 : llvm::Intrinsic::read_register,
7009 Types);
7010 llvm::Value *Call = Builder.CreateCall(F, Metadata);
7011
7012 if (MixedTypes)
7013 // Read into 64 bit register and then truncate result to 32 bit.
7014 return Builder.CreateTrunc(Call, ValueType);
7015
7016 if (ValueType->isPointerTy())
7017 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
7018 return Builder.CreateIntToPtr(Call, ValueType);
7019
7020 return Call;
7021 }
7022
7023 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
7024 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
7025 if (MixedTypes) {
7026 // Extend 32 bit write value to 64 bit to pass to write.
7027 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
7028 return Builder.CreateCall(F, { Metadata, ArgValue });
7029 }
7030
7031 if (ValueType->isPointerTy()) {
7032 // Have VoidPtrTy ArgValue but want to return an i32/i64.
7033 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
7034 return Builder.CreateCall(F, { Metadata, ArgValue });
7035 }
7036
7037 return Builder.CreateCall(F, { Metadata, ArgValue });
7038}
7039
7040/// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
7041/// argument that specifies the vector type.
7042static bool HasExtraNeonArgument(unsigned BuiltinID) {
7043 switch (BuiltinID) {
7044 default: break;
7045 case NEON::BI__builtin_neon_vget_lane_i8:
7046 case NEON::BI__builtin_neon_vget_lane_i16:
7047 case NEON::BI__builtin_neon_vget_lane_bf16:
7048 case NEON::BI__builtin_neon_vget_lane_i32:
7049 case NEON::BI__builtin_neon_vget_lane_i64:
7050 case NEON::BI__builtin_neon_vget_lane_f32:
7051 case NEON::BI__builtin_neon_vgetq_lane_i8:
7052 case NEON::BI__builtin_neon_vgetq_lane_i16:
7053 case NEON::BI__builtin_neon_vgetq_lane_bf16:
7054 case NEON::BI__builtin_neon_vgetq_lane_i32:
7055 case NEON::BI__builtin_neon_vgetq_lane_i64:
7056 case NEON::BI__builtin_neon_vgetq_lane_f32:
7057 case NEON::BI__builtin_neon_vduph_lane_bf16:
7058 case NEON::BI__builtin_neon_vduph_laneq_bf16:
7059 case NEON::BI__builtin_neon_vset_lane_i8:
7060 case NEON::BI__builtin_neon_vset_lane_i16:
7061 case NEON::BI__builtin_neon_vset_lane_bf16:
7062 case NEON::BI__builtin_neon_vset_lane_i32:
7063 case NEON::BI__builtin_neon_vset_lane_i64:
7064 case NEON::BI__builtin_neon_vset_lane_f32:
7065 case NEON::BI__builtin_neon_vsetq_lane_i8:
7066 case NEON::BI__builtin_neon_vsetq_lane_i16:
7067 case NEON::BI__builtin_neon_vsetq_lane_bf16:
7068 case NEON::BI__builtin_neon_vsetq_lane_i32:
7069 case NEON::BI__builtin_neon_vsetq_lane_i64:
7070 case NEON::BI__builtin_neon_vsetq_lane_f32:
7071 case NEON::BI__builtin_neon_vsha1h_u32:
7072 case NEON::BI__builtin_neon_vsha1cq_u32:
7073 case NEON::BI__builtin_neon_vsha1pq_u32:
7074 case NEON::BI__builtin_neon_vsha1mq_u32:
7075 case NEON::BI__builtin_neon_vcvth_bf16_f32:
7076 case clang::ARM::BI_MoveToCoprocessor:
7077 case clang::ARM::BI_MoveToCoprocessor2:
7078 return false;
7079 }
7080 return true;
7081}
7082
7083Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
7084 const CallExpr *E,
7085 ReturnValueSlot ReturnValue,
7086 llvm::Triple::ArchType Arch) {
7087 if (auto Hint = GetValueForARMHint(BuiltinID))
7088 return Hint;
7089
7090 if (BuiltinID == ARM::BI__emit) {
7091 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
7092 llvm::FunctionType *FTy =
7093 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
7094
7095 Expr::EvalResult Result;
7096 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
7097 llvm_unreachable("Sema will ensure that the parameter is constant");
7098
7099 llvm::APSInt Value = Result.Val.getInt();
7100 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
7101
7102 llvm::InlineAsm *Emit =
7103 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
7104 /*hasSideEffects=*/true)
7105 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
7106 /*hasSideEffects=*/true);
7107
7108 return Builder.CreateCall(Emit);
7109 }
7110
7111 if (BuiltinID == ARM::BI__builtin_arm_dbg) {
7112 Value *Option = EmitScalarExpr(E->getArg(0));
7113 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
7114 }
7115
7116 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
7117 Value *Address = EmitScalarExpr(E->getArg(0));
7118 Value *RW = EmitScalarExpr(E->getArg(1));
7119 Value *IsData = EmitScalarExpr(E->getArg(2));
7120
7121 // Locality is not supported on ARM target
7122 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
7123
7124 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
7125 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
7126 }
7127
7128 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
7129 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7130 return Builder.CreateCall(
7131 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
7132 }
7133
7134 if (BuiltinID == ARM::BI__builtin_arm_cls) {
7135 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7136 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
7137 }
7138 if (BuiltinID == ARM::BI__builtin_arm_cls64) {
7139 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7140 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
7141 "cls");
7142 }
7143
7144 if (BuiltinID == ARM::BI__clear_cache) {
7145 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
7146 const FunctionDecl *FD = E->getDirectCallee();
7147 Value *Ops[2];
7148 for (unsigned i = 0; i < 2; i++)
7149 Ops[i] = EmitScalarExpr(E->getArg(i));
7150 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
7151 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
7152 StringRef Name = FD->getName();
7153 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
7154 }
7155
7156 if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
7157 BuiltinID == ARM::BI__builtin_arm_mcrr2) {
7158 Function *F;
7159
7160 switch (BuiltinID) {
7161 default: llvm_unreachable("unexpected builtin");
7162 case ARM::BI__builtin_arm_mcrr:
7163 F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
7164 break;
7165 case ARM::BI__builtin_arm_mcrr2:
7166 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
7167 break;
7168 }
7169
7170 // MCRR{2} instruction has 5 operands but
7171 // the intrinsic has 4 because Rt and Rt2
7172 // are represented as a single unsigned 64
7173 // bit integer in the intrinsic definition
7174 // but internally it's represented as 2 32
7175 // bit integers.
7176
7177 Value *Coproc = EmitScalarExpr(E->getArg(0));
7178 Value *Opc1 = EmitScalarExpr(E->getArg(1));
7179 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
7180 Value *CRm = EmitScalarExpr(E->getArg(3));
7181
7182 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
7183 Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
7184 Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
7185 Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
7186
7187 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
7188 }
7189
7190 if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
7191 BuiltinID == ARM::BI__builtin_arm_mrrc2) {
7192 Function *F;
7193
7194 switch (BuiltinID) {
7195 default: llvm_unreachable("unexpected builtin");
7196 case ARM::BI__builtin_arm_mrrc:
7197 F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
7198 break;
7199 case ARM::BI__builtin_arm_mrrc2:
7200 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
7201 break;
7202 }
7203
7204 Value *Coproc = EmitScalarExpr(E->getArg(0));
7205 Value *Opc1 = EmitScalarExpr(E->getArg(1));
7206 Value *CRm = EmitScalarExpr(E->getArg(2));
7207 Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
7208
7209 // Returns an unsigned 64 bit integer, represented
7210 // as two 32 bit integers.
7211
7212 Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
7213 Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
7214 Rt = Builder.CreateZExt(Rt, Int64Ty);
7215 Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
7216
7217 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
7218 RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
7219 RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
7220
7221 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
7222 }
7223
7224 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
7225 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
7226 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
7227 getContext().getTypeSize(E->getType()) == 64) ||
7228 BuiltinID == ARM::BI__ldrexd) {
7229 Function *F;
7230
7231 switch (BuiltinID) {
7232 default: llvm_unreachable("unexpected builtin");
7233 case ARM::BI__builtin_arm_ldaex:
7234 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
7235 break;
7236 case ARM::BI__builtin_arm_ldrexd:
7237 case ARM::BI__builtin_arm_ldrex:
7238 case ARM::BI__ldrexd:
7239 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
7240 break;
7241 }
7242
7243 Value *LdPtr = EmitScalarExpr(E->getArg(0));
7244 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
7245 "ldrexd");
7246
7247 Value *Val0 = Builder.CreateExtractValue(Val, 1);
7248 Value *Val1 = Builder.CreateExtractValue(Val, 0);
7249 Val0 = Builder.CreateZExt(Val0, Int64Ty);
7250 Val1 = Builder.CreateZExt(Val1, Int64Ty);
7251
7252 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
7253 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
7254 Val = Builder.CreateOr(Val, Val1);
7255 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
7256 }
7257
7258 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
7259 BuiltinID == ARM::BI__builtin_arm_ldaex) {
7260 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
7261
7262 QualType Ty = E->getType();
7263 llvm::Type *RealResTy = ConvertType(Ty);
7264 llvm::Type *PtrTy = llvm::IntegerType::get(
7265 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
7266 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
7267
7268 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
7269 ? Intrinsic::arm_ldaex
7270 : Intrinsic::arm_ldrex,
7271 PtrTy);
7272 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
7273
7274 if (RealResTy->isPointerTy())
7275 return Builder.CreateIntToPtr(Val, RealResTy);
7276 else {
7277 llvm::Type *IntResTy = llvm::IntegerType::get(
7278 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
7279 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
7280 return Builder.CreateBitCast(Val, RealResTy);
7281 }
7282 }
7283
7284 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
7285 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
7286 BuiltinID == ARM::BI__builtin_arm_strex) &&
7287 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
7288 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
7289 ? Intrinsic::arm_stlexd
7290 : Intrinsic::arm_strexd);
7291 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
7292
7293 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
7294 Value *Val = EmitScalarExpr(E->getArg(0));
7295 Builder.CreateStore(Val, Tmp);
7296
7297 Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
7298 Val = Builder.CreateLoad(LdPtr);
7299
7300 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
7301 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
7302 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
7303 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
7304 }
7305
7306 if (BuiltinID == ARM::BI__builtin_arm_strex ||
7307 BuiltinID == ARM::BI__builtin_arm_stlex) {
7308 Value *StoreVal = EmitScalarExpr(E->getArg(0));
7309 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
7310
7311 QualType Ty = E->getArg(0)->getType();
7312 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
7313 getContext().getTypeSize(Ty));
7314 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
7315
7316 if (StoreVal->getType()->isPointerTy())
7317 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
7318 else {
7319 llvm::Type *IntTy = llvm::IntegerType::get(
7320 getLLVMContext(),
7321 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
7322 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
7323 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
7324 }
7325
7326 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
7327 ? Intrinsic::arm_stlex
7328 : Intrinsic::arm_strex,
7329 StoreAddr->getType());
7330 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
7331 }
7332
7333 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
7334 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
7335 return Builder.CreateCall(F);
7336 }
7337
7338 // CRC32
7339 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
7340 switch (BuiltinID) {
7341 case ARM::BI__builtin_arm_crc32b:
7342 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
7343 case ARM::BI__builtin_arm_crc32cb:
7344 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
7345 case ARM::BI__builtin_arm_crc32h:
7346 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
7347 case ARM::BI__builtin_arm_crc32ch:
7348 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
7349 case ARM::BI__builtin_arm_crc32w:
7350 case ARM::BI__builtin_arm_crc32d:
7351 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
7352 case ARM::BI__builtin_arm_crc32cw:
7353 case ARM::BI__builtin_arm_crc32cd:
7354 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
7355 }
7356
7357 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
7358 Value *Arg0 = EmitScalarExpr(E->getArg(0));
7359 Value *Arg1 = EmitScalarExpr(E->getArg(1));
7360
7361 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
7362 // intrinsics, hence we need different codegen for these cases.
7363 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
7364 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
7365 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
7366 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
7367 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
7368 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
7369
7370 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7371 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
7372 return Builder.CreateCall(F, {Res, Arg1b});
7373 } else {
7374 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
7375
7376 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7377 return Builder.CreateCall(F, {Arg0, Arg1});
7378 }
7379 }
7380
7381 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
7382 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7383 BuiltinID == ARM::BI__builtin_arm_rsrp ||
7384 BuiltinID == ARM::BI__builtin_arm_wsr ||
7385 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
7386 BuiltinID == ARM::BI__builtin_arm_wsrp) {
7387
7388 SpecialRegisterAccessKind AccessKind = Write;
7389 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
7390 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7391 BuiltinID == ARM::BI__builtin_arm_rsrp)
7392 AccessKind = VolatileRead;
7393
7394 bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
7395 BuiltinID == ARM::BI__builtin_arm_wsrp;
7396
7397 bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7398 BuiltinID == ARM::BI__builtin_arm_wsr64;
7399
7400 llvm::Type *ValueType;
7401 llvm::Type *RegisterType;
7402 if (IsPointerBuiltin) {
7403 ValueType = VoidPtrTy;
7404 RegisterType = Int32Ty;
7405 } else if (Is64Bit) {
7406 ValueType = RegisterType = Int64Ty;
7407 } else {
7408 ValueType = RegisterType = Int32Ty;
7409 }
7410
7411 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
7412 AccessKind);
7413 }
7414
7415 // Handle MSVC intrinsics before argument evaluation to prevent double
7416 // evaluation.
7417 if (Optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID))
7418 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
7419
7420 // Deal with MVE builtins
7421 if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
7422 return Result;
7423 // Handle CDE builtins
7424 if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
7425 return Result;
7426
7427 // Find out if any arguments are required to be integer constant
7428 // expressions.
7429 unsigned ICEArguments = 0;
7430 ASTContext::GetBuiltinTypeError Error;
7431 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
7432 assert(Error == ASTContext::GE_None && "Should not codegen an error");
7433
7434 auto getAlignmentValue32 = [&](Address addr) -> Value* {
7435 return Builder.getInt32(addr.getAlignment().getQuantity());
7436 };
7437
7438 Address PtrOp0 = Address::invalid();
7439 Address PtrOp1 = Address::invalid();
7440 SmallVector<Value*, 4> Ops;
7441 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
7442 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
7443 for (unsigned i = 0, e = NumArgs; i != e; i++) {
7444 if (i == 0) {
7445 switch (BuiltinID) {
7446 case NEON::BI__builtin_neon_vld1_v:
7447 case NEON::BI__builtin_neon_vld1q_v:
7448 case NEON::BI__builtin_neon_vld1q_lane_v:
7449 case NEON::BI__builtin_neon_vld1_lane_v:
7450 case NEON::BI__builtin_neon_vld1_dup_v:
7451 case NEON::BI__builtin_neon_vld1q_dup_v:
7452 case NEON::BI__builtin_neon_vst1_v:
7453 case NEON::BI__builtin_neon_vst1q_v:
7454 case NEON::BI__builtin_neon_vst1q_lane_v:
7455 case NEON::BI__builtin_neon_vst1_lane_v:
7456 case NEON::BI__builtin_neon_vst2_v:
7457 case NEON::BI__builtin_neon_vst2q_v:
7458 case NEON::BI__builtin_neon_vst2_lane_v:
7459 case NEON::BI__builtin_neon_vst2q_lane_v:
7460 case NEON::BI__builtin_neon_vst3_v:
7461 case NEON::BI__builtin_neon_vst3q_v:
7462 case NEON::BI__builtin_neon_vst3_lane_v:
7463 case NEON::BI__builtin_neon_vst3q_lane_v:
7464 case NEON::BI__builtin_neon_vst4_v:
7465 case NEON::BI__builtin_neon_vst4q_v:
7466 case NEON::BI__builtin_neon_vst4_lane_v:
7467 case NEON::BI__builtin_neon_vst4q_lane_v:
7468 // Get the alignment for the argument in addition to the value;
7469 // we'll use it later.
7470 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
7471 Ops.push_back(PtrOp0.getPointer());
7472 continue;
7473 }
7474 }
7475 if (i == 1) {
7476 switch (BuiltinID) {
7477 case NEON::BI__builtin_neon_vld2_v:
7478 case NEON::BI__builtin_neon_vld2q_v:
7479 case NEON::BI__builtin_neon_vld3_v:
7480 case NEON::BI__builtin_neon_vld3q_v:
7481 case NEON::BI__builtin_neon_vld4_v:
7482 case NEON::BI__builtin_neon_vld4q_v:
7483 case NEON::BI__builtin_neon_vld2_lane_v:
7484 case NEON::BI__builtin_neon_vld2q_lane_v:
7485 case NEON::BI__builtin_neon_vld3_lane_v:
7486 case NEON::BI__builtin_neon_vld3q_lane_v:
7487 case NEON::BI__builtin_neon_vld4_lane_v:
7488 case NEON::BI__builtin_neon_vld4q_lane_v:
7489 case NEON::BI__builtin_neon_vld2_dup_v:
7490 case NEON::BI__builtin_neon_vld2q_dup_v:
7491 case NEON::BI__builtin_neon_vld3_dup_v:
7492 case NEON::BI__builtin_neon_vld3q_dup_v:
7493 case NEON::BI__builtin_neon_vld4_dup_v:
7494 case NEON::BI__builtin_neon_vld4q_dup_v:
7495 // Get the alignment for the argument in addition to the value;
7496 // we'll use it later.
7497 PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
7498 Ops.push_back(PtrOp1.getPointer());
7499 continue;
7500 }
7501 }
7502
7503 if ((ICEArguments & (1 << i)) == 0) {
7504 Ops.push_back(EmitScalarExpr(E->getArg(i)));
7505 } else {
7506 // If this is required to be a constant, constant fold it so that we know
7507 // that the generated intrinsic gets a ConstantInt.
7508 Ops.push_back(llvm::ConstantInt::get(
7509 getLLVMContext(),
7510 *E->getArg(i)->getIntegerConstantExpr(getContext())));
7511 }
7512 }
7513
7514 switch (BuiltinID) {
7515 default: break;
7516
7517 case NEON::BI__builtin_neon_vget_lane_i8:
7518 case NEON::BI__builtin_neon_vget_lane_i16:
7519 case NEON::BI__builtin_neon_vget_lane_i32:
7520 case NEON::BI__builtin_neon_vget_lane_i64:
7521 case NEON::BI__builtin_neon_vget_lane_bf16:
7522 case NEON::BI__builtin_neon_vget_lane_f32:
7523 case NEON::BI__builtin_neon_vgetq_lane_i8:
7524 case NEON::BI__builtin_neon_vgetq_lane_i16:
7525 case NEON::BI__builtin_neon_vgetq_lane_i32:
7526 case NEON::BI__builtin_neon_vgetq_lane_i64:
7527 case NEON::BI__builtin_neon_vgetq_lane_bf16:
7528 case NEON::BI__builtin_neon_vgetq_lane_f32:
7529 case NEON::BI__builtin_neon_vduph_lane_bf16:
7530 case NEON::BI__builtin_neon_vduph_laneq_bf16:
7531 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
7532
7533 case NEON::BI__builtin_neon_vrndns_f32: {
7534 Value *Arg = EmitScalarExpr(E->getArg(0));
7535 llvm::Type *Tys[] = {Arg->getType()};
7536 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
7537 return Builder.CreateCall(F, {Arg}, "vrndn"); }
7538
7539 case NEON::BI__builtin_neon_vset_lane_i8:
7540 case NEON::BI__builtin_neon_vset_lane_i16:
7541 case NEON::BI__builtin_neon_vset_lane_i32:
7542 case NEON::BI__builtin_neon_vset_lane_i64:
7543 case NEON::BI__builtin_neon_vset_lane_bf16:
7544 case NEON::BI__builtin_neon_vset_lane_f32:
7545 case NEON::BI__builtin_neon_vsetq_lane_i8:
7546 case NEON::BI__builtin_neon_vsetq_lane_i16:
7547 case NEON::BI__builtin_neon_vsetq_lane_i32:
7548 case NEON::BI__builtin_neon_vsetq_lane_i64:
7549 case NEON::BI__builtin_neon_vsetq_lane_bf16:
7550 case NEON::BI__builtin_neon_vsetq_lane_f32:
7551 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
7552
7553 case NEON::BI__builtin_neon_vsha1h_u32:
7554 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
7555 "vsha1h");
7556 case NEON::BI__builtin_neon_vsha1cq_u32:
7557 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
7558 "vsha1h");
7559 case NEON::BI__builtin_neon_vsha1pq_u32:
7560 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
7561 "vsha1h");
7562 case NEON::BI__builtin_neon_vsha1mq_u32:
7563 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
7564 "vsha1h");
7565
7566 case NEON::BI__builtin_neon_vcvth_bf16_f32: {
7567 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
7568 "vcvtbfp2bf");
7569 }
7570
7571 // The ARM _MoveToCoprocessor builtins put the input register value as
7572 // the first argument, but the LLVM intrinsic expects it as the third one.
7573 case ARM::BI_MoveToCoprocessor:
7574 case ARM::BI_MoveToCoprocessor2: {
7575 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
7576 Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
7577 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
7578 Ops[3], Ops[4], Ops[5]});
7579 }
7580 }
7581
7582 // Get the last argument, which specifies the vector type.
7583 assert(HasExtraArg);
7584 const Expr *Arg = E->getArg(E->getNumArgs()-1);
7585 Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext());
7586 if (!Result)
7587 return nullptr;
7588
7589 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
7590 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
7591 // Determine the overloaded type of this builtin.
7592 llvm::Type *Ty;
7593 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
7594 Ty = FloatTy;
7595 else
7596 Ty = DoubleTy;
7597
7598 // Determine whether this is an unsigned conversion or not.
7599 bool usgn = Result->getZExtValue() == 1;
7600 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
7601
7602 // Call the appropriate intrinsic.
7603 Function *F = CGM.getIntrinsic(Int, Ty);
7604 return Builder.CreateCall(F, Ops, "vcvtr");
7605 }
7606
7607 // Determine the type of this overloaded NEON intrinsic.
7608 NeonTypeFlags Type = Result->getZExtValue();
7609 bool usgn = Type.isUnsigned();
7610 bool rightShift = false;
7611
7612 llvm::FixedVectorType *VTy =
7613 GetNeonType(this, Type, getTarget().hasLegalHalfType(), false,
7614 getTarget().hasBFloat16Type());
7615 llvm::Type *Ty = VTy;
7616 if (!Ty)
7617 return nullptr;
7618
7619 // Many NEON builtins have identical semantics and uses in ARM and
7620 // AArch64. Emit these in a single function.
7621 auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
7622 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
7623 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
7624 if (Builtin)
7625 return EmitCommonNeonBuiltinExpr(
7626 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
7627 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
7628
7629 unsigned Int;
7630 switch (BuiltinID) {
7631 default: return nullptr;
7632 case NEON::BI__builtin_neon_vld1q_lane_v:
7633 // Handle 64-bit integer elements as a special case. Use shuffles of
7634 // one-element vectors to avoid poor code for i64 in the backend.
7635 if (VTy->getElementType()->isIntegerTy(64)) {
7636 // Extract the other lane.
7637 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7638 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
7639 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
7640 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7641 // Load the value as a one-element vector.
7642 Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
7643 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7644 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
7645 Value *Align = getAlignmentValue32(PtrOp0);
7646 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
7647 // Combine them.
7648 int Indices[] = {1 - Lane, Lane};
7649 return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
7650 }
7651 LLVM_FALLTHROUGH;
7652 case NEON::BI__builtin_neon_vld1_lane_v: {
7653 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7654 PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
7655 Value *Ld = Builder.CreateLoad(PtrOp0);
7656 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
7657 }
7658 case NEON::BI__builtin_neon_vqrshrn_n_v:
7659 Int =
7660 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
7661 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
7662 1, true);
7663 case NEON::BI__builtin_neon_vqrshrun_n_v:
7664 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
7665 Ops, "vqrshrun_n", 1, true);
7666 case NEON::BI__builtin_neon_vqshrn_n_v:
7667 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
7668 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
7669 1, true);
7670 case NEON::BI__builtin_neon_vqshrun_n_v:
7671 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
7672 Ops, "vqshrun_n", 1, true);
7673 case NEON::BI__builtin_neon_vrecpe_v:
7674 case NEON::BI__builtin_neon_vrecpeq_v:
7675 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
7676 Ops, "vrecpe");
7677 case NEON::BI__builtin_neon_vrshrn_n_v:
7678 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
7679 Ops, "vrshrn_n", 1, true);
7680 case NEON::BI__builtin_neon_vrsra_n_v:
7681 case NEON::BI__builtin_neon_vrsraq_n_v:
7682 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7683 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7684 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
7685 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
7686 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
7687 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
7688 case NEON::BI__builtin_neon_vsri_n_v:
7689 case NEON::BI__builtin_neon_vsriq_n_v:
7690 rightShift = true;
7691 LLVM_FALLTHROUGH;
7692 case NEON::BI__builtin_neon_vsli_n_v:
7693 case NEON::BI__builtin_neon_vsliq_n_v:
7694 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
7695 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
7696 Ops, "vsli_n");
7697 case NEON::BI__builtin_neon_vsra_n_v:
7698 case NEON::BI__builtin_neon_vsraq_n_v:
7699 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7700 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
7701 return Builder.CreateAdd(Ops[0], Ops[1]);
7702 case NEON::BI__builtin_neon_vst1q_lane_v:
7703 // Handle 64-bit integer elements as a special case. Use a shuffle to get
7704 // a one-element vector and avoid poor code for i64 in the backend.
7705 if (VTy->getElementType()->isIntegerTy(64)) {
7706 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7707 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
7708 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7709 Ops[2] = getAlignmentValue32(PtrOp0);
7710 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
7711 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
7712 Tys), Ops);
7713 }
7714 LLVM_FALLTHROUGH;
7715 case NEON::BI__builtin_neon_vst1_lane_v: {
7716 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7717 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
7718 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
7719 auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
7720 return St;
7721 }
7722 case NEON::BI__builtin_neon_vtbl1_v:
7723 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
7724 Ops, "vtbl1");
7725 case NEON::BI__builtin_neon_vtbl2_v:
7726 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
7727 Ops, "vtbl2");
7728 case NEON::BI__builtin_neon_vtbl3_v:
7729 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
7730 Ops, "vtbl3");
7731 case NEON::BI__builtin_neon_vtbl4_v:
7732 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
7733 Ops, "vtbl4");
7734 case NEON::BI__builtin_neon_vtbx1_v:
7735 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
7736 Ops, "vtbx1");
7737 case NEON::BI__builtin_neon_vtbx2_v:
7738 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
7739 Ops, "vtbx2");
7740 case NEON::BI__builtin_neon_vtbx3_v:
7741 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
7742 Ops, "vtbx3");
7743 case NEON::BI__builtin_neon_vtbx4_v:
7744 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
7745 Ops, "vtbx4");
7746 }
7747}
7748
7749template<typename Integer>
7750static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
7751 return E->getIntegerConstantExpr(Context)->getExtValue();
7752}
7753
7754static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
7755 llvm::Type *T, bool Unsigned) {
7756 // Helper function called by Tablegen-constructed ARM MVE builtin codegen,
7757 // which finds it convenient to specify signed/unsigned as a boolean flag.
7758 return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T);
7759}
7760
7761static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
7762 uint32_t Shift, bool Unsigned) {
7763 // MVE helper function for integer shift right. This must handle signed vs
7764 // unsigned, and also deal specially with the case where the shift count is
7765 // equal to the lane size. In LLVM IR, an LShr with that parameter would be
7766 // undefined behavior, but in MVE it's legal, so we must convert it to code
7767 // that is not undefined in IR.
7768 unsigned LaneBits = cast<llvm::VectorType>(V->getType())
7769 ->getElementType()
7770 ->getPrimitiveSizeInBits();
7771 if (Shift == LaneBits) {
7772 // An unsigned shift of the full lane size always generates zero, so we can
7773 // simply emit a zero vector. A signed shift of the full lane size does the
7774 // same thing as shifting by one bit fewer.
7775 if (Unsigned)
7776 return llvm::Constant::getNullValue(V->getType());
7777 else
7778 --Shift;
7779 }
7780 return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift);
7781}
7782
7783static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
7784 // MVE-specific helper function for a vector splat, which infers the element
7785 // count of the output vector by knowing that MVE vectors are all 128 bits
7786 // wide.
7787 unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits();
7788 return Builder.CreateVectorSplat(Elements, V);
7789}
7790
7791static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
7792 CodeGenFunction *CGF,
7793 llvm::Value *V,
7794 llvm::Type *DestType) {
7795 // Convert one MVE vector type into another by reinterpreting its in-register
7796 // format.
7797 //
7798 // Little-endian, this is identical to a bitcast (which reinterprets the
7799 // memory format). But big-endian, they're not necessarily the same, because
7800 // the register and memory formats map to each other differently depending on
7801 // the lane size.
7802 //
7803 // We generate a bitcast whenever we can (if we're little-endian, or if the
7804 // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
7805 // that performs the different kind of reinterpretation.
7806 if (CGF->getTarget().isBigEndian() &&
7807 V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
7808 return Builder.CreateCall(
7809 CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
7810 {DestType, V->getType()}),
7811 V);
7812 } else {
7813 return Builder.CreateBitCast(V, DestType);
7814 }
7815}
7816
7817static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
7818 // Make a shufflevector that extracts every other element of a vector (evens
7819 // or odds, as desired).
7820 SmallVector<int, 16> Indices;
7821 unsigned InputElements =
7822 cast<llvm::FixedVectorType>(V->getType())->getNumElements();
7823 for (unsigned i = 0; i < InputElements; i += 2)
7824 Indices.push_back(i + Odd);
7825 return Builder.CreateShuffleVector(V, Indices);
7826}
7827
7828static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
7829 llvm::Value *V1) {
7830 // Make a shufflevector that interleaves two vectors element by element.
7831 assert(V0->getType() == V1->getType() && "Can't zip different vector types");
7832 SmallVector<int, 16> Indices;
7833 unsigned InputElements =
7834 cast<llvm::FixedVectorType>(V0->getType())->getNumElements();
7835 for (unsigned i = 0; i < InputElements; i++) {
7836 Indices.push_back(i);
7837 Indices.push_back(i + InputElements);
7838 }
7839 return Builder.CreateShuffleVector(V0, V1, Indices);
7840}
7841
7842template<unsigned HighBit, unsigned OtherBits>
7843static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
7844 // MVE-specific helper function to make a vector splat of a constant such as
7845 // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
7846 llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
7847 unsigned LaneBits = T->getPrimitiveSizeInBits();
7848 uint32_t Value = HighBit << (LaneBits - 1);
7849 if (OtherBits)
7850 Value |= (1UL << (LaneBits - 1)) - 1;
7851 llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
7852 return ARMMVEVectorSplat(Builder, Lane);
7853}
7854
7855static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
7856 llvm::Value *V,
7857 unsigned ReverseWidth) {
7858 // MVE-specific helper function which reverses the elements of a
7859 // vector within every (ReverseWidth)-bit collection of lanes.
7860 SmallVector<int, 16> Indices;
7861 unsigned LaneSize = V->getType()->getScalarSizeInBits();
7862 unsigned Elements = 128 / LaneSize;
7863 unsigned Mask = ReverseWidth / LaneSize - 1;
7864 for (unsigned i = 0; i < Elements; i++)
7865 Indices.push_back(i ^ Mask);
7866 return Builder.CreateShuffleVector(V, Indices);
7867}
7868
7869Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
7870 const CallExpr *E,
7871 ReturnValueSlot ReturnValue,
7872 llvm::Triple::ArchType Arch) {
7873 enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType;
7874 Intrinsic::ID IRIntr;
7875 unsigned NumVectors;
7876
7877 // Code autogenerated by Tablegen will handle all the simple builtins.
7878 switch (BuiltinID) {
7879 #include "clang/Basic/arm_mve_builtin_cg.inc"
7880
7881 // If we didn't match an MVE builtin id at all, go back to the
7882 // main EmitARMBuiltinExpr.
7883 default:
7884 return nullptr;
7885 }
7886
7887 // Anything that breaks from that switch is an MVE builtin that
7888 // needs handwritten code to generate.
7889
7890 switch (CustomCodeGenType) {
7891
7892 case CustomCodeGen::VLD24: {
7893 llvm::SmallVector<Value *, 4> Ops;
7894 llvm::SmallVector<llvm::Type *, 4> Tys;
7895
7896 auto MvecCType = E->getType();
7897 auto MvecLType = ConvertType(MvecCType);
7898 assert(MvecLType->isStructTy() &&
7899 "Return type for vld[24]q should be a struct");
7900 assert(MvecLType->getStructNumElements() == 1 &&
7901 "Return-type struct for vld[24]q should have one element");
7902 auto MvecLTypeInner = MvecLType->getStructElementType(0);
7903 assert(MvecLTypeInner->isArrayTy() &&
7904 "Return-type struct for vld[24]q should contain an array");
7905 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
7906 "Array member of return-type struct vld[24]q has wrong length");
7907 auto VecLType = MvecLTypeInner->getArrayElementType();
7908
7909 Tys.push_back(VecLType);
7910
7911 auto Addr = E->getArg(0);
7912 Ops.push_back(EmitScalarExpr(Addr));
7913 Tys.push_back(ConvertType(Addr->getType()));
7914
7915 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
7916 Value *LoadResult = Builder.CreateCall(F, Ops);
7917 Value *MvecOut = UndefValue::get(MvecLType);
7918 for (unsigned i = 0; i < NumVectors; ++i) {
7919 Value *Vec = Builder.CreateExtractValue(LoadResult, i);
7920 MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
7921 }
7922
7923 if (ReturnValue.isNull())
7924 return MvecOut;
7925 else
7926 return Builder.CreateStore(MvecOut, ReturnValue.getValue());
7927 }
7928
7929 case CustomCodeGen::VST24: {
7930 llvm::SmallVector<Value *, 4> Ops;
7931 llvm::SmallVector<llvm::Type *, 4> Tys;
7932
7933 auto Addr = E->getArg(0);
7934 Ops.push_back(EmitScalarExpr(Addr));
7935 Tys.push_back(ConvertType(Addr->getType()));
7936
7937 auto MvecCType = E->getArg(1)->getType();
7938 auto MvecLType = ConvertType(MvecCType);
7939 assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct");
7940 assert(MvecLType->getStructNumElements() == 1 &&
7941 "Data-type struct for vst2q should have one element");
7942 auto MvecLTypeInner = MvecLType->getStructElementType(0);
7943 assert(MvecLTypeInner->isArrayTy() &&
7944 "Data-type struct for vst2q should contain an array");
7945 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
7946 "Array member of return-type struct vld[24]q has wrong length");
7947 auto VecLType = MvecLTypeInner->getArrayElementType();
7948
7949 Tys.push_back(VecLType);
7950
7951 AggValueSlot MvecSlot = CreateAggTemp(MvecCType);
7952 EmitAggExpr(E->getArg(1), MvecSlot);
7953 auto Mvec = Builder.CreateLoad(MvecSlot.getAddress());
7954 for (unsigned i = 0; i < NumVectors; i++)
7955 Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
7956
7957 Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
7958 Value *ToReturn = nullptr;
7959 for (unsigned i = 0; i < NumVectors; i++) {
7960 Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
7961 ToReturn = Builder.CreateCall(F, Ops);
7962 Ops.pop_back();
7963 }
7964 return ToReturn;
7965 }
7966 }
7967 llvm_unreachable("unknown custom codegen type.");
7968}
7969
7970Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
7971 const CallExpr *E,
7972 ReturnValueSlot ReturnValue,
7973 llvm::Triple::ArchType Arch) {
7974 switch (BuiltinID) {
7975 default:
7976 return nullptr;
7977#include "clang/Basic/arm_cde_builtin_cg.inc"
7978 }
7979}
7980
7981static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
7982 const CallExpr *E,
7983 SmallVectorImpl<Value *> &Ops,
7984 llvm::Triple::ArchType Arch) {
7985 unsigned int Int = 0;
7986 const char *s = nullptr;
7987
7988 switch (BuiltinID) {
7989 default:
7990 return nullptr;
7991 case NEON::BI__builtin_neon_vtbl1_v:
7992 case NEON::BI__builtin_neon_vqtbl1_v:
7993 case NEON::BI__builtin_neon_vqtbl1q_v:
7994 case NEON::BI__builtin_neon_vtbl2_v:
7995 case NEON::BI__builtin_neon_vqtbl2_v:
7996 case NEON::BI__builtin_neon_vqtbl2q_v:
7997 case NEON::BI__builtin_neon_vtbl3_v:
7998 case NEON::BI__builtin_neon_vqtbl3_v:
7999 case NEON::BI__builtin_neon_vqtbl3q_v:
8000 case NEON::BI__builtin_neon_vtbl4_v:
8001 case NEON::BI__builtin_neon_vqtbl4_v:
8002 case NEON::BI__builtin_neon_vqtbl4q_v:
8003 break;
8004 case NEON::BI__builtin_neon_vtbx1_v:
8005 case NEON::BI__builtin_neon_vqtbx1_v:
8006 case NEON::BI__builtin_neon_vqtbx1q_v:
8007 case NEON::BI__builtin_neon_vtbx2_v:
8008 case NEON::BI__builtin_neon_vqtbx2_v:
8009 case NEON::BI__builtin_neon_vqtbx2q_v:
8010 case NEON::BI__builtin_neon_vtbx3_v:
8011 case NEON::BI__builtin_neon_vqtbx3_v:
8012 case NEON::BI__builtin_neon_vqtbx3q_v:
8013 case NEON::BI__builtin_neon_vtbx4_v:
8014 case NEON::BI__builtin_neon_vqtbx4_v:
8015 case NEON::BI__builtin_neon_vqtbx4q_v:
8016 break;
8017 }
8018
8019 assert(E->getNumArgs() >= 3);
8020
8021 // Get the last argument, which specifies the vector type.
8022 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
8023 Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext());
8024 if (!Result)
8025 return nullptr;
8026
8027 // Determine the type of this overloaded NEON intrinsic.
8028 NeonTypeFlags Type = Result->getZExtValue();
8029 llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type);
8030 if (!Ty)
8031 return nullptr;
8032
8033 CodeGen::CGBuilderTy &Builder = CGF.Builder;
8034
8035 // AArch64 scalar builtins are not overloaded, they do not have an extra
8036 // argument that specifies the vector type, need to handle each case.
8037 switch (BuiltinID) {
8038 case NEON::BI__builtin_neon_vtbl1_v: {
8039 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
8040 Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
8041 "vtbl1");
8042 }
8043 case NEON::BI__builtin_neon_vtbl2_v: {
8044 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
8045 Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
8046 "vtbl1");
8047 }
8048 case NEON::BI__builtin_neon_vtbl3_v: {
8049 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
8050 Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
8051 "vtbl2");
8052 }
8053 case NEON::BI__builtin_neon_vtbl4_v: {
8054 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
8055 Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
8056 "vtbl2");
8057 }
8058 case NEON::BI__builtin_neon_vtbx1_v: {
8059 Value *TblRes =
8060 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
8061 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
8062
8063 llvm::Constant *EightV = ConstantInt::get(Ty, 8);
8064 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
8065 CmpRes = Builder.CreateSExt(CmpRes, Ty);
8066
8067 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
8068 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
8069 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
8070 }
8071 case NEON::BI__builtin_neon_vtbx2_v: {
8072 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
8073 Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
8074 "vtbx1");
8075 }
8076 case NEON::BI__builtin_neon_vtbx3_v: {
8077 Value *TblRes =
8078 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
8079 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
8080
8081 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
8082 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
8083 TwentyFourV);
8084 CmpRes = Builder.CreateSExt(CmpRes, Ty);
8085
8086 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
8087 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
8088 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
8089 }
8090 case NEON::BI__builtin_neon_vtbx4_v: {
8091 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
8092 Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
8093 "vtbx2");
8094 }
8095 case NEON::BI__builtin_neon_vqtbl1_v:
8096 case NEON::BI__builtin_neon_vqtbl1q_v:
8097 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
8098 case NEON::BI__builtin_neon_vqtbl2_v:
8099 case NEON::BI__builtin_neon_vqtbl2q_v: {
8100 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
8101 case NEON::BI__builtin_neon_vqtbl3_v:
8102 case NEON::BI__builtin_neon_vqtbl3q_v:
8103 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
8104 case NEON::BI__builtin_neon_vqtbl4_v:
8105 case NEON::BI__builtin_neon_vqtbl4q_v:
8106 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
8107 case NEON::BI__builtin_neon_vqtbx1_v:
8108 case NEON::BI__builtin_neon_vqtbx1q_v:
8109 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
8110 case NEON::BI__builtin_neon_vqtbx2_v:
8111 case NEON::BI__builtin_neon_vqtbx2q_v:
8112 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
8113 case NEON::BI__builtin_neon_vqtbx3_v:
8114 case NEON::BI__builtin_neon_vqtbx3q_v:
8115 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
8116 case NEON::BI__builtin_neon_vqtbx4_v:
8117 case NEON::BI__builtin_neon_vqtbx4q_v:
8118 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
8119 }
8120 }
8121
8122 if (!Int)
8123 return nullptr;
8124
8125 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
8126 return CGF.EmitNeonCall(F, Ops, s);
8127}
8128
8129Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
8130 auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
8131 Op = Builder.CreateBitCast(Op, Int16Ty);
8132 Value *V = UndefValue::get(VTy);
8133 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
8134 Op = Builder.CreateInsertElement(V, Op, CI);
8135 return Op;
8136}
8137
8138/// SVEBuiltinMemEltTy - Returns the memory element type for this memory
8139/// access builtin. Only required if it can't be inferred from the base pointer
8140/// operand.
8141llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(SVETypeFlags TypeFlags) {
8142 switch (TypeFlags.getMemEltType()) {
8143 case SVETypeFlags::MemEltTyDefault:
8144 return getEltType(TypeFlags);
8145 case SVETypeFlags::MemEltTyInt8:
8146 return Builder.getInt8Ty();
8147 case SVETypeFlags::MemEltTyInt16:
8148 return Builder.getInt16Ty();
8149 case SVETypeFlags::MemEltTyInt32:
8150 return Builder.getInt32Ty();
8151 case SVETypeFlags::MemEltTyInt64:
8152 return Builder.getInt64Ty();
8153 }
8154 llvm_unreachable("Unknown MemEltType");
8155}
8156
8157llvm::Type *CodeGenFunction::getEltType(SVETypeFlags TypeFlags) {
8158 switch (TypeFlags.getEltType()) {
8159 default:
8160 llvm_unreachable("Invalid SVETypeFlag!");
8161
8162 case SVETypeFlags::EltTyInt8:
8163 return Builder.getInt8Ty();
8164 case SVETypeFlags::EltTyInt16:
8165 return Builder.getInt16Ty();
8166 case SVETypeFlags::EltTyInt32:
8167 return Builder.getInt32Ty();
8168 case SVETypeFlags::EltTyInt64:
8169 return Builder.getInt64Ty();
8170
8171 case SVETypeFlags::EltTyFloat16:
8172 return Builder.getHalfTy();
8173 case SVETypeFlags::EltTyFloat32:
8174 return Builder.getFloatTy();
8175 case SVETypeFlags::EltTyFloat64:
8176 return Builder.getDoubleTy();
8177
8178 case SVETypeFlags::EltTyBFloat16:
8179 return Builder.getBFloatTy();
8180
8181 case SVETypeFlags::EltTyBool8:
8182 case SVETypeFlags::EltTyBool16:
8183 case SVETypeFlags::EltTyBool32:
8184 case SVETypeFlags::EltTyBool64:
8185 return Builder.getInt1Ty();
8186 }
8187}
8188
8189// Return the llvm predicate vector type corresponding to the specified element
8190// TypeFlags.
8191llvm::ScalableVectorType *
8192CodeGenFunction::getSVEPredType(SVETypeFlags TypeFlags) {
8193 switch (TypeFlags.getEltType()) {
8194 default: llvm_unreachable("Unhandled SVETypeFlag!");
8195
8196 case SVETypeFlags::EltTyInt8:
8197 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8198 case SVETypeFlags::EltTyInt16:
8199 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8200 case SVETypeFlags::EltTyInt32:
8201 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8202 case SVETypeFlags::EltTyInt64:
8203 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8204
8205 case SVETypeFlags::EltTyBFloat16:
8206 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8207 case SVETypeFlags::EltTyFloat16:
8208 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8209 case SVETypeFlags::EltTyFloat32:
8210 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8211 case SVETypeFlags::EltTyFloat64:
8212 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8213
8214 case SVETypeFlags::EltTyBool8:
8215 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8216 case SVETypeFlags::EltTyBool16:
8217 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8218 case SVETypeFlags::EltTyBool32:
8219 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8220 case SVETypeFlags::EltTyBool64:
8221 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8222 }
8223}
8224
8225// Return the llvm vector type corresponding to the specified element TypeFlags.
8226llvm::ScalableVectorType *
8227CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
8228 switch (TypeFlags.getEltType()) {
8229 default:
8230 llvm_unreachable("Invalid SVETypeFlag!");
8231
8232 case SVETypeFlags::EltTyInt8:
8233 return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
8234 case SVETypeFlags::EltTyInt16:
8235 return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
8236 case SVETypeFlags::EltTyInt32:
8237 return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
8238 case SVETypeFlags::EltTyInt64:
8239 return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
8240
8241 case SVETypeFlags::EltTyFloat16:
8242 return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
8243 case SVETypeFlags::EltTyBFloat16:
8244 return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
8245 case SVETypeFlags::EltTyFloat32:
8246 return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
8247 case SVETypeFlags::EltTyFloat64:
8248 return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
8249
8250 case SVETypeFlags::EltTyBool8:
8251 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8252 case SVETypeFlags::EltTyBool16:
8253 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8254 case SVETypeFlags::EltTyBool32:
8255 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8256 case SVETypeFlags::EltTyBool64:
8257 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8258 }
8259}
8260
8261llvm::Value *CodeGenFunction::EmitSVEAllTruePred(SVETypeFlags TypeFlags) {
8262 Function *Ptrue =
8263 CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
8264 return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
8265}
8266
8267constexpr unsigned SVEBitsPerBlock = 128;
8268
8269static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
8270 unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
8271 return llvm::ScalableVectorType::get(EltTy, NumElts);
8272}
8273
8274// Reinterpret the input predicate so that it can be used to correctly isolate
8275// the elements of the specified datatype.
8276Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
8277 llvm::ScalableVectorType *VTy) {
8278 auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
8279 if (Pred->getType() == RTy)
8280 return Pred;
8281
8282 unsigned IntID;
8283 llvm::Type *IntrinsicTy;
8284 switch (VTy->getMinNumElements()) {
8285 default:
8286 llvm_unreachable("unsupported element count!");
8287 case 2:
8288 case 4:
8289 case 8:
8290 IntID = Intrinsic::aarch64_sve_convert_from_svbool;
8291 IntrinsicTy = RTy;
8292 break;
8293 case 16:
8294 IntID = Intrinsic::aarch64_sve_convert_to_svbool;
8295 IntrinsicTy = Pred->getType();
8296 break;
8297 }
8298
8299 Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
8300 Value *C = Builder.CreateCall(F, Pred);
8301 assert(C->getType() == RTy && "Unexpected return type!");
8302 return C;
8303}
8304
8305Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags,
8306 SmallVectorImpl<Value *> &Ops,
8307 unsigned IntID) {
8308 auto *ResultTy = getSVEType(TypeFlags);
8309 auto *OverloadedTy =
8310 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
8311
8312 // At the ACLE level there's only one predicate type, svbool_t, which is
8313 // mapped to <n x 16 x i1>. However, this might be incompatible with the
8314 // actual type being loaded. For example, when loading doubles (i64) the
8315 // predicated should be <n x 2 x i1> instead. At the IR level the type of
8316 // the predicate and the data being loaded must match. Cast accordingly.
8317 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
8318
8319 Function *F = nullptr;
8320 if (Ops[1]->getType()->isVectorTy())
8321 // This is the "vector base, scalar offset" case. In order to uniquely
8322 // map this built-in to an LLVM IR intrinsic, we need both the return type
8323 // and the type of the vector base.
8324 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
8325 else
8326 // This is the "scalar base, vector offset case". The type of the offset
8327 // is encoded in the name of the intrinsic. We only need to specify the
8328 // return type in order to uniquely map this built-in to an LLVM IR
8329 // intrinsic.
8330 F = CGM.getIntrinsic(IntID, OverloadedTy);
8331
8332 // Pass 0 when the offset is missing. This can only be applied when using
8333 // the "vector base" addressing mode for which ACLE allows no offset. The
8334 // corresponding LLVM IR always requires an offset.
8335 if (Ops.size() == 2) {
8336 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
8337 Ops.push_back(ConstantInt::get(Int64Ty, 0));
8338 }
8339
8340 // For "vector base, scalar index" scale the index so that it becomes a
8341 // scalar offset.
8342 if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
8343 unsigned BytesPerElt =
8344 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
8345 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8346 Ops[2] = Builder.CreateMul(Ops[2], Scale);
8347 }
8348
8349 Value *Call = Builder.CreateCall(F, Ops);
8350
8351 // The following sext/zext is only needed when ResultTy != OverloadedTy. In
8352 // other cases it's folded into a nop.
8353 return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
8354 : Builder.CreateSExt(Call, ResultTy);
8355}
8356
8357Value *CodeGenFunction::EmitSVEScatterStore(SVETypeFlags TypeFlags,
8358 SmallVectorImpl<Value *> &Ops,
8359 unsigned IntID) {
8360 auto *SrcDataTy = getSVEType(TypeFlags);
8361 auto *OverloadedTy =
8362 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
8363
8364 // In ACLE the source data is passed in the last argument, whereas in LLVM IR
8365 // it's the first argument. Move it accordingly.
8366 Ops.insert(Ops.begin(), Ops.pop_back_val());
8367
8368 Function *F = nullptr;
8369 if (Ops[2]->getType()->isVectorTy())
8370 // This is the "vector base, scalar offset" case. In order to uniquely
8371 // map this built-in to an LLVM IR intrinsic, we need both the return type
8372 // and the type of the vector base.
8373 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
8374 else
8375 // This is the "scalar base, vector offset case". The type of the offset
8376 // is encoded in the name of the intrinsic. We only need to specify the
8377 // return type in order to uniquely map this built-in to an LLVM IR
8378 // intrinsic.
8379 F = CGM.getIntrinsic(IntID, OverloadedTy);
8380
8381 // Pass 0 when the offset is missing. This can only be applied when using
8382 // the "vector base" addressing mode for which ACLE allows no offset. The
8383 // corresponding LLVM IR always requires an offset.
8384 if (Ops.size() == 3) {
8385 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
8386 Ops.push_back(ConstantInt::get(Int64Ty, 0));
8387 }
8388
8389 // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
8390 // folded into a nop.
8391 Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
8392
8393 // At the ACLE level there's only one predicate type, svbool_t, which is
8394 // mapped to <n x 16 x i1>. However, this might be incompatible with the
8395 // actual type being stored. For example, when storing doubles (i64) the
8396 // predicated should be <n x 2 x i1> instead. At the IR level the type of
8397 // the predicate and the data being stored must match. Cast accordingly.
8398 Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
8399
8400 // For "vector base, scalar index" scale the index so that it becomes a
8401 // scalar offset.
8402 if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
8403 unsigned BytesPerElt =
8404 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
8405 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8406 Ops[3] = Builder.CreateMul(Ops[3], Scale);
8407 }
8408
8409 return Builder.CreateCall(F, Ops);
8410}
8411
8412Value *CodeGenFunction::EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
8413 SmallVectorImpl<Value *> &Ops,
8414 unsigned IntID) {
8415 // The gather prefetches are overloaded on the vector input - this can either
8416 // be the vector of base addresses or vector of offsets.
8417 auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
8418 if (!OverloadedTy)
8419 OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
8420
8421 // Cast the predicate from svbool_t to the right number of elements.
8422 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
8423
8424 // vector + imm addressing modes
8425 if (Ops[1]->getType()->isVectorTy()) {
8426 if (Ops.size() == 3) {
8427 // Pass 0 for 'vector+imm' when the index is omitted.
8428 Ops.push_back(ConstantInt::get(Int64Ty, 0));
8429
8430 // The sv_prfop is the last operand in the builtin and IR intrinsic.
8431 std::swap(Ops[2], Ops[3]);
8432 } else {
8433 // Index needs to be passed as scaled offset.
8434 llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8435 unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
8436 Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8437 Ops[2] = Builder.CreateMul(Ops[2], Scale);
8438 }
8439 }
8440
8441 Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
8442 return Builder.CreateCall(F, Ops);
8443}
8444
8445Value *CodeGenFunction::EmitSVEStructLoad(SVETypeFlags TypeFlags,
8446 SmallVectorImpl<Value*> &Ops,
8447 unsigned IntID) {
8448 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8449 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8450 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8451
8452 unsigned N;
8453 switch (IntID) {
8454 case Intrinsic::aarch64_sve_ld2:
8455 N = 2;
8456 break;
8457 case Intrinsic::aarch64_sve_ld3:
8458 N = 3;
8459 break;
8460 case Intrinsic::aarch64_sve_ld4:
8461 N = 4;
8462 break;
8463 default:
8464 llvm_unreachable("unknown intrinsic!");
8465 }
8466 auto RetTy = llvm::VectorType::get(VTy->getElementType(),
8467 VTy->getElementCount() * N);
8468
8469 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8470 Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy);
8471 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8472 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8473 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8474
8475 Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()});
8476 return Builder.CreateCall(F, { Predicate, BasePtr });
8477}
8478
8479Value *CodeGenFunction::EmitSVEStructStore(SVETypeFlags TypeFlags,
8480 SmallVectorImpl<Value*> &Ops,
8481 unsigned IntID) {
8482 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8483 auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8484 auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8485
8486 unsigned N;
8487 switch (IntID) {
8488 case Intrinsic::aarch64_sve_st2:
8489 N = 2;
8490 break;
8491 case Intrinsic::aarch64_sve_st3:
8492 N = 3;
8493 break;
8494 case Intrinsic::aarch64_sve_st4:
8495 N = 4;
8496 break;
8497 default:
8498 llvm_unreachable("unknown intrinsic!");
8499 }
8500 auto TupleTy =
8501 llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N);
8502
8503 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8504 Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy);
8505 Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0);
8506 Value *Val = Ops.back();
8507 BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8508 BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8509
8510 // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
8511 // need to break up the tuple vector.
8512 SmallVector<llvm::Value*, 5> Operands;
8513 Function *FExtr =
8514 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8515 for (unsigned I = 0; I < N; ++I)
8516 Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)}));
8517 Operands.append({Predicate, BasePtr});
8518
8519 Function *F = CGM.getIntrinsic(IntID, { VTy });
8520 return Builder.CreateCall(F, Operands);
8521}
8522
8523// SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
8524// svpmullt_pair intrinsics, with the exception that their results are bitcast
8525// to a wider type.
8526Value *CodeGenFunction::EmitSVEPMull(SVETypeFlags TypeFlags,
8527 SmallVectorImpl<Value *> &Ops,
8528 unsigned BuiltinID) {
8529 // Splat scalar operand to vector (intrinsics with _n infix)
8530 if (TypeFlags.hasSplatOperand()) {
8531 unsigned OpNo = TypeFlags.getSplatOperand();
8532 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8533 }
8534
8535 // The pair-wise function has a narrower overloaded type.
8536 Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
8537 Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
8538
8539 // Now bitcast to the wider result type.
8540 llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
8541 return EmitSVEReinterpret(Call, Ty);
8542}
8543
8544Value *CodeGenFunction::EmitSVEMovl(SVETypeFlags TypeFlags,
8545 ArrayRef<Value *> Ops, unsigned BuiltinID) {
8546 llvm::Type *OverloadedTy = getSVEType(TypeFlags);
8547 Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
8548 return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
8549}
8550
8551Value *CodeGenFunction::EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
8552 SmallVectorImpl<Value *> &Ops,
8553 unsigned BuiltinID) {
8554 auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8555 auto *VectorTy = getSVEVectorForElementType(MemEltTy);
8556 auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8557
8558 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8559 Value *BasePtr = Ops[1];
8560
8561 // Implement the index operand if not omitted.
8562 if (Ops.size() > 3) {
8563 BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
8564 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
8565 }
8566
8567 // Prefetch intriniscs always expect an i8*
8568 BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
8569 Value *PrfOp = Ops.back();
8570
8571 Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
8572 return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
8573}
8574
8575Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
8576 llvm::Type *ReturnTy,
8577 SmallVectorImpl<Value *> &Ops,
8578 unsigned BuiltinID,
8579 bool IsZExtReturn) {
8580 QualType LangPTy = E->getArg(1)->getType();
8581 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8582 LangPTy->getAs<PointerType>()->getPointeeType());
8583
8584 // The vector type that is returned may be different from the
8585 // eventual type loaded from memory.
8586 auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
8587 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8588
8589 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8590 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8591 Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8592 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8593
8594 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8595 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8596 Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
8597
8598 return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
8599 : Builder.CreateSExt(Load, VectorTy);
8600}
8601
8602Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
8603 SmallVectorImpl<Value *> &Ops,
8604 unsigned BuiltinID) {
8605 QualType LangPTy = E->getArg(1)->getType();
8606 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8607 LangPTy->getAs<PointerType>()->getPointeeType());
8608
8609 // The vector type that is stored may be different from the
8610 // eventual type stored to memory.
8611 auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
8612 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8613
8614 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8615 Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8616 Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0);
8617 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8618
8619 // Last value is always the data
8620 llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
8621
8622 BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8623 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8624 return Builder.CreateCall(F, {Val, Predicate, BasePtr});
8625}
8626
8627// Limit the usage of scalable llvm IR generated by the ACLE by using the
8628// sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
8629Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
8630 auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
8631 return Builder.CreateCall(F, Scalar);
8632}
8633
8634Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
8635 return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
8636}
8637
8638Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
8639 // FIXME: For big endian this needs an additional REV, or needs a separate
8640 // intrinsic that is code-generated as a no-op, because the LLVM bitcast
8641 // instruction is defined as 'bitwise' equivalent from memory point of
8642 // view (when storing/reloading), whereas the svreinterpret builtin
8643 // implements bitwise equivalent cast from register point of view.
8644 // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
8645 return Builder.CreateBitCast(Val, Ty);
8646}
8647
8648static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8649 SmallVectorImpl<Value *> &Ops) {
8650 auto *SplatZero = Constant::getNullValue(Ty);
8651 Ops.insert(Ops.begin(), SplatZero);
8652}
8653
8654static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8655 SmallVectorImpl<Value *> &Ops) {
8656 auto *SplatUndef = UndefValue::get(Ty);
8657 Ops.insert(Ops.begin(), SplatUndef);
8658}
8659
8660SmallVector<llvm::Type *, 2> CodeGenFunction::getSVEOverloadTypes(
8661 SVETypeFlags TypeFlags, llvm::Type *ResultType, ArrayRef<Value *> Ops) {
8662 if (TypeFlags.isOverloadNone())
8663 return {};
8664
8665 llvm::Type *DefaultType = getSVEType(TypeFlags);
8666
8667 if (TypeFlags.isOverloadWhile())
8668 return {DefaultType, Ops[1]->getType()};
8669
8670 if (TypeFlags.isOverloadWhileRW())
8671 return {getSVEPredType(TypeFlags), Ops[0]->getType()};
8672
8673 if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
8674 return {Ops[0]->getType(), Ops.back()->getType()};
8675
8676 if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
8677 return {ResultType, Ops[0]->getType()};
8678
8679 assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
8680 return {DefaultType};
8681}
8682
8683Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
8684 const CallExpr *E) {
8685 // Find out if any arguments are required to be integer constant expressions.
8686 unsigned ICEArguments = 0;
8687 ASTContext::GetBuiltinTypeError Error;
8688 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
8689 assert(Error == ASTContext::GE_None && "Should not codegen an error");
8690
8691 llvm::Type *Ty = ConvertType(E->getType());
8692 if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
8693 BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
8694 Value *Val = EmitScalarExpr(E->getArg(0));
8695 return EmitSVEReinterpret(Val, Ty);
8696 }
8697
8698 llvm::SmallVector<Value *, 4> Ops;
8699 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
8700 if ((ICEArguments & (1 << i)) == 0)
8701 Ops.push_back(EmitScalarExpr(E->getArg(i)));
8702 else {
8703 // If this is required to be a constant, constant fold it so that we know
8704 // that the generated intrinsic gets a ConstantInt.
8705 Optional<llvm::APSInt> Result =
8706 E->getArg(i)->getIntegerConstantExpr(getContext());
8707 assert(Result && "Expected argument to be a constant");
8708
8709 // Immediates for SVE llvm intrinsics are always 32bit. We can safely
8710 // truncate because the immediate has been range checked and no valid
8711 // immediate requires more than a handful of bits.
8712 *Result = Result->extOrTrunc(32);
8713 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
8714 }
8715 }
8716
8717 auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
8718 AArch64SVEIntrinsicsProvenSorted);
8719 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8720 if (TypeFlags.isLoad())
8721 return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
8722 TypeFlags.isZExtReturn());
8723 else if (TypeFlags.isStore())
8724 return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
8725 else if (TypeFlags.isGatherLoad())
8726 return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8727 else if (TypeFlags.isScatterStore())
8728 return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8729 else if (TypeFlags.isPrefetch())
8730 return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8731 else if (TypeFlags.isGatherPrefetch())
8732 return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8733 else if (TypeFlags.isStructLoad())
8734 return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8735 else if (TypeFlags.isStructStore())
8736 return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
8737 else if (TypeFlags.isUndef())
8738 return UndefValue::get(Ty);
8739 else if (Builtin->LLVMIntrinsic != 0) {
8740 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
8741 InsertExplicitZeroOperand(Builder, Ty, Ops);
8742
8743 if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
8744 InsertExplicitUndefOperand(Builder, Ty, Ops);
8745
8746 // Some ACLE builtins leave out the argument to specify the predicate
8747 // pattern, which is expected to be expanded to an SV_ALL pattern.
8748 if (TypeFlags.isAppendSVALL())
8749 Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
8750 if (TypeFlags.isInsertOp1SVALL())
8751 Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
8752
8753 // Predicates must match the main datatype.
8754 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
8755 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
8756 if (PredTy->getElementType()->isIntegerTy(1))
8757 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
8758
8759 // Splat scalar operand to vector (intrinsics with _n infix)
8760 if (TypeFlags.hasSplatOperand()) {
8761 unsigned OpNo = TypeFlags.getSplatOperand();
8762 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8763 }
8764
8765 if (TypeFlags.isReverseCompare())
8766 std::swap(Ops[1], Ops[2]);
8767
8768 if (TypeFlags.isReverseUSDOT())
8769 std::swap(Ops[1], Ops[2]);
8770
8771 // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
8772 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
8773 llvm::Type *OpndTy = Ops[1]->getType();
8774 auto *SplatZero = Constant::getNullValue(OpndTy);
8775 Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
8776 Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
8777 }
8778
8779 Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
8780 getSVEOverloadTypes(TypeFlags, Ty, Ops));
8781 Value *Call = Builder.CreateCall(F, Ops);
8782
8783 // Predicate results must be converted to svbool_t.
8784 if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
8785 if (PredTy->getScalarType()->isIntegerTy(1))
8786 Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
8787
8788 return Call;
8789 }
8790
8791 switch (BuiltinID) {
8792 default:
8793 return nullptr;
8794
8795 case SVE::BI__builtin_sve_svmov_b_z: {
8796 // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
8797 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8798 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
8799 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
8800 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
8801 }
8802
8803 case SVE::BI__builtin_sve_svnot_b_z: {
8804 // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
8805 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8806 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
8807 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
8808 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
8809 }
8810
8811 case SVE::BI__builtin_sve_svmovlb_u16:
8812 case SVE::BI__builtin_sve_svmovlb_u32:
8813 case SVE::BI__builtin_sve_svmovlb_u64:
8814 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
8815
8816 case SVE::BI__builtin_sve_svmovlb_s16:
8817 case SVE::BI__builtin_sve_svmovlb_s32:
8818 case SVE::BI__builtin_sve_svmovlb_s64:
8819 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
8820
8821 case SVE::BI__builtin_sve_svmovlt_u16:
8822 case SVE::BI__builtin_sve_svmovlt_u32:
8823 case SVE::BI__builtin_sve_svmovlt_u64:
8824 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
8825
8826 case SVE::BI__builtin_sve_svmovlt_s16:
8827 case SVE::BI__builtin_sve_svmovlt_s32:
8828 case SVE::BI__builtin_sve_svmovlt_s64:
8829 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
8830
8831 case SVE::BI__builtin_sve_svpmullt_u16:
8832 case SVE::BI__builtin_sve_svpmullt_u64:
8833 case SVE::BI__builtin_sve_svpmullt_n_u16:
8834 case SVE::BI__builtin_sve_svpmullt_n_u64:
8835 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
8836
8837 case SVE::BI__builtin_sve_svpmullb_u16:
8838 case SVE::BI__builtin_sve_svpmullb_u64:
8839 case SVE::BI__builtin_sve_svpmullb_n_u16:
8840 case SVE::BI__builtin_sve_svpmullb_n_u64:
8841 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
8842
8843 case SVE::BI__builtin_sve_svdup_n_b8:
8844 case SVE::BI__builtin_sve_svdup_n_b16:
8845 case SVE::BI__builtin_sve_svdup_n_b32:
8846 case SVE::BI__builtin_sve_svdup_n_b64: {
8847 Value *CmpNE =
8848 Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
8849 llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
8850 Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
8851 return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
8852 }
8853
8854 case SVE::BI__builtin_sve_svdupq_n_b8:
8855 case SVE::BI__builtin_sve_svdupq_n_b16:
8856 case SVE::BI__builtin_sve_svdupq_n_b32:
8857 case SVE::BI__builtin_sve_svdupq_n_b64:
8858 case SVE::BI__builtin_sve_svdupq_n_u8:
8859 case SVE::BI__builtin_sve_svdupq_n_s8:
8860 case SVE::BI__builtin_sve_svdupq_n_u64:
8861 case SVE::BI__builtin_sve_svdupq_n_f64:
8862 case SVE::BI__builtin_sve_svdupq_n_s64:
8863 case SVE::BI__builtin_sve_svdupq_n_u16:
8864 case SVE::BI__builtin_sve_svdupq_n_f16:
8865 case SVE::BI__builtin_sve_svdupq_n_bf16:
8866 case SVE::BI__builtin_sve_svdupq_n_s16:
8867 case SVE::BI__builtin_sve_svdupq_n_u32:
8868 case SVE::BI__builtin_sve_svdupq_n_f32:
8869 case SVE::BI__builtin_sve_svdupq_n_s32: {
8870 // These builtins are implemented by storing each element to an array and using
8871 // ld1rq to materialize a vector.
8872 unsigned NumOpnds = Ops.size();
8873
8874 bool IsBoolTy =
8875 cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
8876
8877 // For svdupq_n_b* the element type of is an integer of type 128/numelts,
8878 // so that the compare can use the width that is natural for the expected
8879 // number of predicate lanes.
8880 llvm::Type *EltTy = Ops[0]->getType();
8881 if (IsBoolTy)
8882 EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
8883
8884 Address Alloca = CreateTempAlloca(llvm::ArrayType::get(EltTy, NumOpnds),
8885 CharUnits::fromQuantity(16));
8886 for (unsigned I = 0; I < NumOpnds; ++I)
8887 Builder.CreateDefaultAlignedStore(
8888 IsBoolTy ? Builder.CreateZExt(Ops[I], EltTy) : Ops[I],
8889 Builder.CreateGEP(Alloca.getPointer(),
8890 {Builder.getInt64(0), Builder.getInt64(I)}));
8891
8892 SVETypeFlags TypeFlags(Builtin->TypeModifier);
8893 Value *Pred = EmitSVEAllTruePred(TypeFlags);
8894
8895 llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
8896 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_ld1rq, OverloadedTy);
8897 Value *Alloca0 = Builder.CreateGEP(
8898 Alloca.getPointer(), {Builder.getInt64(0), Builder.getInt64(0)});
8899 Value *LD1RQ = Builder.CreateCall(F, {Pred, Alloca0});
8900
8901 if (!IsBoolTy)
8902 return LD1RQ;
8903
8904 // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
8905 F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
8906 : Intrinsic::aarch64_sve_cmpne_wide,
8907 OverloadedTy);
8908 Value *Call =
8909 Builder.CreateCall(F, {Pred, LD1RQ, EmitSVEDupX(Builder.getInt64(0))});
8910 return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
8911 }
8912
8913 case SVE::BI__builtin_sve_svpfalse_b:
8914 return ConstantInt::getFalse(Ty);
8915
8916 case SVE::BI__builtin_sve_svlen_bf16:
8917 case SVE::BI__builtin_sve_svlen_f16:
8918 case SVE::BI__builtin_sve_svlen_f32:
8919 case SVE::BI__builtin_sve_svlen_f64:
8920 case SVE::BI__builtin_sve_svlen_s8:
8921 case SVE::BI__builtin_sve_svlen_s16:
8922 case SVE::BI__builtin_sve_svlen_s32:
8923 case SVE::BI__builtin_sve_svlen_s64:
8924 case SVE::BI__builtin_sve_svlen_u8:
8925 case SVE::BI__builtin_sve_svlen_u16:
8926 case SVE::BI__builtin_sve_svlen_u32:
8927 case SVE::BI__builtin_sve_svlen_u64: {
8928 SVETypeFlags TF(Builtin->TypeModifier);
8929 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
8930 auto *NumEls =
8931 llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue());
8932
8933 Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
8934 return Builder.CreateMul(NumEls, Builder.CreateCall(F));
8935 }
8936
8937 case SVE::BI__builtin_sve_svtbl2_u8:
8938 case SVE::BI__builtin_sve_svtbl2_s8:
8939 case SVE::BI__builtin_sve_svtbl2_u16:
8940 case SVE::BI__builtin_sve_svtbl2_s16:
8941 case SVE::BI__builtin_sve_svtbl2_u32:
8942 case SVE::BI__builtin_sve_svtbl2_s32:
8943 case SVE::BI__builtin_sve_svtbl2_u64:
8944 case SVE::BI__builtin_sve_svtbl2_s64:
8945 case SVE::BI__builtin_sve_svtbl2_f16:
8946 case SVE::BI__builtin_sve_svtbl2_bf16:
8947 case SVE::BI__builtin_sve_svtbl2_f32:
8948 case SVE::BI__builtin_sve_svtbl2_f64: {
8949 SVETypeFlags TF(Builtin->TypeModifier);
8950 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
8951 auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy);
8952 Function *FExtr =
8953 CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8954 Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
8955 Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)});
8956 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
8957 return Builder.CreateCall(F, {V0, V1, Ops[1]});
8958 }
8959 }
8960
8961 /// Should not happen
8962 return nullptr;
8963}
8964
8965Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
8966 const CallExpr *E,
8967 llvm::Triple::ArchType Arch) {
8968 if (BuiltinID >= AArch64::FirstSVEBuiltin &&
8969 BuiltinID <= AArch64::LastSVEBuiltin)
8970 return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
8971
8972 unsigned HintID = static_cast<unsigned>(-1);
8973 switch (BuiltinID) {
8974 default: break;
8975 case AArch64::BI__builtin_arm_nop:
8976 HintID = 0;
8977 break;
8978 case AArch64::BI__builtin_arm_yield:
8979 case AArch64::BI__yield:
8980 HintID = 1;
8981 break;
8982 case AArch64::BI__builtin_arm_wfe:
8983 case AArch64::BI__wfe:
8984 HintID = 2;
8985 break;
8986 case AArch64::BI__builtin_arm_wfi:
8987 case AArch64::BI__wfi:
8988 HintID = 3;
8989 break;
8990 case AArch64::BI__builtin_arm_sev:
8991 case AArch64::BI__sev:
8992 HintID = 4;
8993 break;
8994 case AArch64::BI__builtin_arm_sevl:
8995 case AArch64::BI__sevl:
8996 HintID = 5;
8997 break;
8998 }
8999
9000 if (HintID != static_cast<unsigned>(-1)) {
9001 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
9002 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
9003 }
9004
9005 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
9006 Value *Address = EmitScalarExpr(E->getArg(0));
9007 Value *RW = EmitScalarExpr(E->getArg(1));
9008 Value *CacheLevel = EmitScalarExpr(E->getArg(2));
9009 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
9010 Value *IsData = EmitScalarExpr(E->getArg(4));
9011
9012 Value *Locality = nullptr;
9013 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
9014 // Temporal fetch, needs to convert cache level to locality.
9015 Locality = llvm::ConstantInt::get(Int32Ty,
9016 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
9017 } else {
9018 // Streaming fetch.
9019 Locality = llvm::ConstantInt::get(Int32Ty, 0);
9020 }
9021
9022 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
9023 // PLDL3STRM or PLDL2STRM.
9024 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
9025 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
9026 }
9027
9028 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
9029 assert((getContext().getTypeSize(E->getType()) == 32) &&
9030 "rbit of unusual size!");
9031 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9032 return Builder.CreateCall(
9033 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
9034 }
9035 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
9036 assert((getContext().getTypeSize(E->getType()) == 64) &&
9037 "rbit of unusual size!");
9038 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9039 return Builder.CreateCall(
9040 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
9041 }
9042
9043 if (BuiltinID == AArch64::BI__builtin_arm_cls) {
9044 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9045 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
9046 "cls");
9047 }
9048 if (BuiltinID == AArch64::BI__builtin_arm_cls64) {
9049 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9050 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
9051 "cls");
9052 }
9053
9054 if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
9055 assert((getContext().getTypeSize(E->getType()) == 32) &&
9056 "__jcvt of unusual size!");
9057 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9058 return Builder.CreateCall(
9059 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
9060 }
9061
9062 if (BuiltinID == AArch64::BI__builtin_arm_ld64b ||
9063 BuiltinID == AArch64::BI__builtin_arm_st64b ||
9064 BuiltinID == AArch64::BI__builtin_arm_st64bv ||
9065 BuiltinID == AArch64::BI__builtin_arm_st64bv0) {
9066 llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0));
9067 llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1));
9068
9069 if (BuiltinID == AArch64::BI__builtin_arm_ld64b) {
9070 // Load from the address via an LLVM intrinsic, receiving a
9071 // tuple of 8 i64 words, and store each one to ValPtr.
9072 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b);
9073 llvm::Value *Val = Builder.CreateCall(F, MemAddr);
9074 llvm::Value *ToRet;
9075 for (size_t i = 0; i < 8; i++) {
9076 llvm::Value *ValOffsetPtr = Builder.CreateGEP(ValPtr, Builder.getInt32(i));
9077 Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
9078 ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr);
9079 }
9080 return ToRet;
9081 } else {
9082 // Load 8 i64 words from ValPtr, and store them to the address
9083 // via an LLVM intrinsic.
9084 SmallVector<llvm::Value *, 9> Args;
9085 Args.push_back(MemAddr);
9086 for (size_t i = 0; i < 8; i++) {
9087 llvm::Value *ValOffsetPtr = Builder.CreateGEP(ValPtr, Builder.getInt32(i));
9088 Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
9089 Args.push_back(Builder.CreateLoad(Addr));
9090 }
9091
9092 auto Intr = (BuiltinID == AArch64::BI__builtin_arm_st64b
9093 ? Intrinsic::aarch64_st64b
9094 : BuiltinID == AArch64::BI__builtin_arm_st64bv
9095 ? Intrinsic::aarch64_st64bv
9096 : Intrinsic::aarch64_st64bv0);
9097 Function *F = CGM.getIntrinsic(Intr);
9098 return Builder.CreateCall(F, Args);
9099 }
9100 }
9101
9102 if (BuiltinID == AArch64::BI__clear_cache) {
9103 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
9104 const FunctionDecl *FD = E->getDirectCallee();
9105 Value *Ops[2];
9106 for (unsigned i = 0; i < 2; i++)
9107 Ops[i] = EmitScalarExpr(E->getArg(i));
9108 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
9109 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
9110 StringRef Name = FD->getName();
9111 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
9112 }
9113
9114 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
9115 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
9116 getContext().getTypeSize(E->getType()) == 128) {
9117 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
9118 ? Intrinsic::aarch64_ldaxp
9119 : Intrinsic::aarch64_ldxp);
9120
9121 Value *LdPtr = EmitScalarExpr(E->getArg(0));
9122 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
9123 "ldxp");
9124
9125 Value *Val0 = Builder.CreateExtractValue(Val, 1);
9126 Value *Val1 = Builder.CreateExtractValue(Val, 0);
9127 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
9128 Val0 = Builder.CreateZExt(Val0, Int128Ty);
9129 Val1 = Builder.CreateZExt(Val1, Int128Ty);
9130
9131 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
9132 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
9133 Val = Builder.CreateOr(Val, Val1);
9134 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
9135 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
9136 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
9137 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
9138
9139 QualType Ty = E->getType();
9140 llvm::Type *RealResTy = ConvertType(Ty);
9141 llvm::Type *PtrTy = llvm::IntegerType::get(
9142 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
9143 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
9144
9145 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
9146 ? Intrinsic::aarch64_ldaxr
9147 : Intrinsic::aarch64_ldxr,
9148 PtrTy);
9149 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
9150
9151 if (RealResTy->isPointerTy())
9152 return Builder.CreateIntToPtr(Val, RealResTy);
9153
9154 llvm::Type *IntResTy = llvm::IntegerType::get(
9155 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
9156 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
9157 return Builder.CreateBitCast(Val, RealResTy);
9158 }
9159
9160 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
9161 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
9162 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
9163 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
9164 ? Intrinsic::aarch64_stlxp
9165 : Intrinsic::aarch64_stxp);
9166 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
9167
9168 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
9169 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
9170
9171 Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
9172 llvm::Value *Val = Builder.CreateLoad(Tmp);
9173
9174 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
9175 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
9176 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
9177 Int8PtrTy);
9178 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
9179 }
9180
9181 if (BuiltinID == AArch64::BI__builtin_arm_strex ||
9182 BuiltinID == AArch64::BI__builtin_arm_stlex) {
9183 Value *StoreVal = EmitScalarExpr(E->getArg(0));
9184 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
9185
9186 QualType Ty = E->getArg(0)->getType();
9187 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
9188 getContext().getTypeSize(Ty));
9189 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
9190
9191 if (StoreVal->getType()->isPointerTy())
9192 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
9193 else {
9194 llvm::Type *IntTy = llvm::IntegerType::get(
9195 getLLVMContext(),
9196 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
9197 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
9198 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
9199 }
9200
9201 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
9202 ? Intrinsic::aarch64_stlxr
9203 : Intrinsic::aarch64_stxr,
9204 StoreAddr->getType());
9205 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
9206 }
9207
9208 if (BuiltinID == AArch64::BI__getReg) {
9209 Expr::EvalResult Result;
9210 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
9211 llvm_unreachable("Sema will ensure that the parameter is constant");
9212
9213 llvm::APSInt Value = Result.Val.getInt();
9214 LLVMContext &Context = CGM.getLLVMContext();
9215 std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10);
9216
9217 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
9218 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
9219 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
9220
9221 llvm::Function *F =
9222 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
9223 return Builder.CreateCall(F, Metadata);
9224 }
9225
9226 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
9227 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
9228 return Builder.CreateCall(F);
9229 }
9230
9231 if (BuiltinID == AArch64::BI_ReadWriteBarrier)
9232 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
9233 llvm::SyncScope::SingleThread);
9234
9235 // CRC32
9236 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
9237 switch (BuiltinID) {
9238 case AArch64::BI__builtin_arm_crc32b:
9239 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
9240 case AArch64::BI__builtin_arm_crc32cb:
9241 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
9242 case AArch64::BI__builtin_arm_crc32h:
9243 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
9244 case AArch64::BI__builtin_arm_crc32ch:
9245 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
9246 case AArch64::BI__builtin_arm_crc32w:
9247 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
9248 case AArch64::BI__builtin_arm_crc32cw:
9249 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
9250 case AArch64::BI__builtin_arm_crc32d:
9251 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
9252 case AArch64::BI__builtin_arm_crc32cd:
9253 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
9254 }
9255
9256 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
9257 Value *Arg0 = EmitScalarExpr(E->getArg(0));
9258 Value *Arg1 = EmitScalarExpr(E->getArg(1));
9259 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
9260
9261 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
9262 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
9263
9264 return Builder.CreateCall(F, {Arg0, Arg1});
9265 }
9266
9267 // Memory Tagging Extensions (MTE) Intrinsics
9268 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
9269 switch (BuiltinID) {
9270 case AArch64::BI__builtin_arm_irg:
9271 MTEIntrinsicID = Intrinsic::aarch64_irg; break;
9272 case AArch64::BI__builtin_arm_addg:
9273 MTEIntrinsicID = Intrinsic::aarch64_addg; break;
9274 case AArch64::BI__builtin_arm_gmi:
9275 MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
9276 case AArch64::BI__builtin_arm_ldg:
9277 MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
9278 case AArch64::BI__builtin_arm_stg:
9279 MTEIntrinsicID = Intrinsic::aarch64_stg; break;
9280 case AArch64::BI__builtin_arm_subp:
9281 MTEIntrinsicID = Intrinsic::aarch64_subp; break;
9282 }
9283
9284 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
9285 llvm::Type *T = ConvertType(E->getType());
9286
9287 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
9288 Value *Pointer = EmitScalarExpr(E->getArg(0));
9289 Value *Mask = EmitScalarExpr(E->getArg(1));
9290
9291 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9292 Mask = Builder.CreateZExt(Mask, Int64Ty);
9293 Value *RV = Builder.CreateCall(
9294 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
9295 return Builder.CreatePointerCast(RV, T);
9296 }
9297 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
9298 Value *Pointer = EmitScalarExpr(E->getArg(0));
9299 Value *TagOffset = EmitScalarExpr(E->getArg(1));
9300
9301 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9302 TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
9303 Value *RV = Builder.CreateCall(
9304 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
9305 return Builder.CreatePointerCast(RV, T);
9306 }
9307 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
9308 Value *Pointer = EmitScalarExpr(E->getArg(0));
9309 Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
9310
9311 ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
9312 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9313 return Builder.CreateCall(
9314 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
9315 }
9316 // Although it is possible to supply a different return
9317 // address (first arg) to this intrinsic, for now we set
9318 // return address same as input address.
9319 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
9320 Value *TagAddress = EmitScalarExpr(E->getArg(0));
9321 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
9322 Value *RV = Builder.CreateCall(
9323 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
9324 return Builder.CreatePointerCast(RV, T);
9325 }
9326 // Although it is possible to supply a different tag (to set)
9327 // to this intrinsic (as first arg), for now we supply
9328 // the tag that is in input address arg (common use case).
9329 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
9330 Value *TagAddress = EmitScalarExpr(E->getArg(0));
9331 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
9332 return Builder.CreateCall(
9333 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
9334 }
9335 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
9336 Value *PointerA = EmitScalarExpr(E->getArg(0));
9337 Value *PointerB = EmitScalarExpr(E->getArg(1));
9338 PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
9339 PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
9340 return Builder.CreateCall(
9341 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
9342 }
9343 }
9344
9345 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
9346 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
9347 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
9348 BuiltinID == AArch64::BI__builtin_arm_wsr ||
9349 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
9350 BuiltinID == AArch64::BI__builtin_arm_wsrp) {
9351
9352 SpecialRegisterAccessKind AccessKind = Write;
9353 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
9354 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
9355 BuiltinID == AArch64::BI__builtin_arm_rsrp)
9356 AccessKind = VolatileRead;
9357
9358 bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
9359 BuiltinID == AArch64::BI__builtin_arm_wsrp;
9360
9361 bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
9362 BuiltinID != AArch64::BI__builtin_arm_wsr;
9363
9364 llvm::Type *ValueType;
9365 llvm::Type *RegisterType = Int64Ty;
9366 if (IsPointerBuiltin) {
9367 ValueType = VoidPtrTy;
9368 } else if (Is64Bit) {
9369 ValueType = Int64Ty;
9370 } else {
9371 ValueType = Int32Ty;
9372 }
9373
9374 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
9375 AccessKind);
9376 }
9377
9378 if (BuiltinID == AArch64::BI_ReadStatusReg ||
9379 BuiltinID == AArch64::BI_WriteStatusReg) {
9380 LLVMContext &Context = CGM.getLLVMContext();
9381
9382 unsigned SysReg =
9383 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
9384
9385 std::string SysRegStr;
9386 llvm::raw_string_ostream(SysRegStr) <<
9387 ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
9388 ((SysReg >> 11) & 7) << ":" <<
9389 ((SysReg >> 7) & 15) << ":" <<
9390 ((SysReg >> 3) & 15) << ":" <<
9391 ( SysReg & 7);
9392
9393 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
9394 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
9395 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
9396
9397 llvm::Type *RegisterType = Int64Ty;
9398 llvm::Type *Types[] = { RegisterType };
9399
9400 if (BuiltinID == AArch64::BI_ReadStatusReg) {
9401 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
9402
9403 return Builder.CreateCall(F, Metadata);
9404 }
9405
9406 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
9407 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
9408
9409 return Builder.CreateCall(F, { Metadata, ArgValue });
9410 }
9411
9412 if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
9413 llvm::Function *F =
9414 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
9415 return Builder.CreateCall(F);
9416 }
9417
9418 if (BuiltinID == AArch64::BI__builtin_sponentry) {
9419 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
9420 return Builder.CreateCall(F);
9421 }
9422
9423 // Handle MSVC intrinsics before argument evaluation to prevent double
9424 // evaluation.
9425 if (Optional<MSVCIntrin> MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID))
9426 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
9427
9428 // Find out if any arguments are required to be integer constant
9429 // expressions.
9430 unsigned ICEArguments = 0;
9431 ASTContext::GetBuiltinTypeError Error;
9432 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
9433 assert(Error == ASTContext::GE_None && "Should not codegen an error");
9434
9435 llvm::SmallVector<Value*, 4> Ops;
9436 Address PtrOp0 = Address::invalid();
9437 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
9438 if (i == 0) {
9439 switch (BuiltinID) {
9440 case NEON::BI__builtin_neon_vld1_v:
9441 case NEON::BI__builtin_neon_vld1q_v:
9442 case NEON::BI__builtin_neon_vld1_dup_v:
9443 case NEON::BI__builtin_neon_vld1q_dup_v:
9444 case NEON::BI__builtin_neon_vld1_lane_v:
9445 case NEON::BI__builtin_neon_vld1q_lane_v:
9446 case NEON::BI__builtin_neon_vst1_v:
9447 case NEON::BI__builtin_neon_vst1q_v:
9448 case NEON::BI__builtin_neon_vst1_lane_v:
9449 case NEON::BI__builtin_neon_vst1q_lane_v:
9450 // Get the alignment for the argument in addition to the value;
9451 // we'll use it later.
9452 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
9453 Ops.push_back(PtrOp0.getPointer());
9454 continue;
9455 }
9456 }
9457 if ((ICEArguments & (1 << i)) == 0) {
9458 Ops.push_back(EmitScalarExpr(E->getArg(i)));
9459 } else {
9460 // If this is required to be a constant, constant fold it so that we know
9461 // that the generated intrinsic gets a ConstantInt.
9462 Ops.push_back(llvm::ConstantInt::get(
9463 getLLVMContext(),
9464 *E->getArg(i)->getIntegerConstantExpr(getContext())));
9465 }
9466 }
9467
9468 auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
9469 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
9470 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
9471
9472 if (Builtin) {
9473 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
9474 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
9475 assert(Result && "SISD intrinsic should have been handled");
9476 return Result;
9477 }
9478
9479 const Expr *Arg = E->getArg(E->getNumArgs()-1);
9480 NeonTypeFlags Type(0);
9481 if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()))
9482 // Determine the type of this overloaded NEON intrinsic.
9483 Type = NeonTypeFlags(Result->getZExtValue());
9484
9485 bool usgn = Type.isUnsigned();
9486 bool quad = Type.isQuad();
9487
9488 // Handle non-overloaded intrinsics first.
9489 switch (BuiltinID) {
9490 default: break;
9491 case NEON::BI__builtin_neon_vabsh_f16:
9492 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9493 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
9494 case NEON::BI__builtin_neon_vldrq_p128: {
9495 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
9496 llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
9497 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
9498 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
9499 CharUnits::fromQuantity(16));
9500 }
9501 case NEON::BI__builtin_neon_vstrq_p128: {
9502 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
9503 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
9504 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
9505 }
9506 case NEON::BI__builtin_neon_vcvts_f32_u32:
9507 case NEON::BI__builtin_neon_vcvtd_f64_u64:
9508 usgn = true;
9509 LLVM_FALLTHROUGH;
9510 case NEON::BI__builtin_neon_vcvts_f32_s32:
9511 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
9512 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9513 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
9514 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
9515 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
9516 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9517 if (usgn)
9518 return Builder.CreateUIToFP(Ops[0], FTy);
9519 return Builder.CreateSIToFP(Ops[0], FTy);
9520 }
9521 case NEON::BI__builtin_neon_vcvth_f16_u16:
9522 case NEON::BI__builtin_neon_vcvth_f16_u32:
9523 case NEON::BI__builtin_neon_vcvth_f16_u64:
9524 usgn = true;
9525 LLVM_FALLTHROUGH;
9526 case NEON::BI__builtin_neon_vcvth_f16_s16:
9527 case NEON::BI__builtin_neon_vcvth_f16_s32:
9528 case NEON::BI__builtin_neon_vcvth_f16_s64: {
9529 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9530 llvm::Type *FTy = HalfTy;
9531 llvm::Type *InTy;
9532 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
9533 InTy = Int64Ty;
9534 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
9535 InTy = Int32Ty;
9536 else
9537 InTy = Int16Ty;
9538 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9539 if (usgn)
9540 return Builder.CreateUIToFP(Ops[0], FTy);
9541 return Builder.CreateSIToFP(Ops[0], FTy);
9542 }
9543 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9544 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9545 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9546 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9547 case NEON::BI__builtin_neon_vcvth_u16_f16:
9548 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9549 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9550 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9551 case NEON::BI__builtin_neon_vcvtph_s16_f16:
9552 case NEON::BI__builtin_neon_vcvth_s16_f16: {
9553 unsigned Int;
9554 llvm::Type* InTy = Int32Ty;
9555 llvm::Type* FTy = HalfTy;
9556 llvm::Type *Tys[2] = {InTy, FTy};
9557 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9558 switch (BuiltinID) {
9559 default: llvm_unreachable("missing builtin ID in switch!");
9560 case NEON::BI__builtin_neon_vcvtah_u16_f16:
9561 Int = Intrinsic::aarch64_neon_fcvtau; break;
9562 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9563 Int = Intrinsic::aarch64_neon_fcvtmu; break;
9564 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9565 Int = Intrinsic::aarch64_neon_fcvtnu; break;
9566 case NEON::BI__builtin_neon_vcvtph_u16_f16:
9567 Int = Intrinsic::aarch64_neon_fcvtpu; break;
9568 case NEON::BI__builtin_neon_vcvth_u16_f16:
9569 Int = Intrinsic::aarch64_neon_fcvtzu; break;
9570 case NEON::BI__builtin_neon_vcvtah_s16_f16:
9571 Int = Intrinsic::aarch64_neon_fcvtas; break;
9572 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9573 Int = Intrinsic::aarch64_neon_fcvtms; break;
9574 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9575 Int = Intrinsic::aarch64_neon_fcvtns; break;
9576 case NEON::BI__builtin_neon_vcvtph_s16_f16:
9577 Int = Intrinsic::aarch64_neon_fcvtps; break;
9578 case NEON::BI__builtin_neon_vcvth_s16_f16:
9579 Int = Intrinsic::aarch64_neon_fcvtzs; break;
9580 }
9581 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
9582 return Builder.CreateTrunc(Ops[0], Int16Ty);
9583 }
9584 case NEON::BI__builtin_neon_vcaleh_f16:
9585 case NEON::BI__builtin_neon_vcalth_f16:
9586 case NEON::BI__builtin_neon_vcageh_f16:
9587 case NEON::BI__builtin_neon_vcagth_f16: {
9588 unsigned Int;
9589 llvm::Type* InTy = Int32Ty;
9590 llvm::Type* FTy = HalfTy;
9591 llvm::Type *Tys[2] = {InTy, FTy};
9592 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9593 switch (BuiltinID) {
9594 default: llvm_unreachable("missing builtin ID in switch!");
9595 case NEON::BI__builtin_neon_vcageh_f16:
9596 Int = Intrinsic::aarch64_neon_facge; break;
9597 case NEON::BI__builtin_neon_vcagth_f16:
9598 Int = Intrinsic::aarch64_neon_facgt; break;
9599 case NEON::BI__builtin_neon_vcaleh_f16:
9600 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
9601 case NEON::BI__builtin_neon_vcalth_f16:
9602 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
9603 }
9604 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
9605 return Builder.CreateTrunc(Ops[0], Int16Ty);
9606 }
9607 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
9608 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
9609 unsigned Int;
9610 llvm::Type* InTy = Int32Ty;
9611 llvm::Type* FTy = HalfTy;
9612 llvm::Type *Tys[2] = {InTy, FTy};
9613 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9614 switch (BuiltinID) {
9615 default: llvm_unreachable("missing builtin ID in switch!");
9616 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
9617 Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
9618 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
9619 Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
9620 }
9621 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
9622 return Builder.CreateTrunc(Ops[0], Int16Ty);
9623 }
9624 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
9625 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
9626 unsigned Int;
9627 llvm::Type* FTy = HalfTy;
9628 llvm::Type* InTy = Int32Ty;
9629 llvm::Type *Tys[2] = {FTy, InTy};
9630 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9631 switch (BuiltinID) {
9632 default: llvm_unreachable("missing builtin ID in switch!");
9633 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
9634 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
9635 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
9636 break;
9637 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
9638 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
9639 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
9640 break;
9641 }
9642 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
9643 }
9644 case NEON::BI__builtin_neon_vpaddd_s64: {
9645 auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
9646 Value *Vec = EmitScalarExpr(E->getArg(0));
9647 // The vector is v2f64, so make sure it's bitcast to that.
9648 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
9649 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9650 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9651 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9652 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9653 // Pairwise addition of a v2f64 into a scalar f64.
9654 return Builder.CreateAdd(Op0, Op1, "vpaddd");
9655 }
9656 case NEON::BI__builtin_neon_vpaddd_f64: {
9657 auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
9658 Value *Vec = EmitScalarExpr(E->getArg(0));
9659 // The vector is v2f64, so make sure it's bitcast to that.
9660 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
9661 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9662 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9663 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9664 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9665 // Pairwise addition of a v2f64 into a scalar f64.
9666 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
9667 }
9668 case NEON::BI__builtin_neon_vpadds_f32: {
9669 auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
9670 Value *Vec = EmitScalarExpr(E->getArg(0));
9671 // The vector is v2f32, so make sure it's bitcast to that.
9672 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
9673 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
9674 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
9675 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
9676 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
9677 // Pairwise addition of a v2f32 into a scalar f32.
9678 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
9679 }
9680 case NEON::BI__builtin_neon_vceqzd_s64:
9681 case NEON::BI__builtin_neon_vceqzd_f64:
9682 case NEON::BI__builtin_neon_vceqzs_f32:
9683 case NEON::BI__builtin_neon_vceqzh_f16:
9684 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9685 return EmitAArch64CompareBuiltinExpr(
9686 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9687 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
9688 case NEON::BI__builtin_neon_vcgezd_s64:
9689 case NEON::BI__builtin_neon_vcgezd_f64:
9690 case NEON::BI__builtin_neon_vcgezs_f32:
9691 case NEON::BI__builtin_neon_vcgezh_f16:
9692 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9693 return EmitAArch64CompareBuiltinExpr(
9694 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9695 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
9696 case NEON::BI__builtin_neon_vclezd_s64:
9697 case NEON::BI__builtin_neon_vclezd_f64:
9698 case NEON::BI__builtin_neon_vclezs_f32:
9699 case NEON::BI__builtin_neon_vclezh_f16:
9700 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9701 return EmitAArch64CompareBuiltinExpr(
9702 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9703 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
9704 case NEON::BI__builtin_neon_vcgtzd_s64:
9705 case NEON::BI__builtin_neon_vcgtzd_f64:
9706 case NEON::BI__builtin_neon_vcgtzs_f32:
9707 case NEON::BI__builtin_neon_vcgtzh_f16:
9708 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9709 return EmitAArch64CompareBuiltinExpr(
9710 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9711 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
9712 case NEON::BI__builtin_neon_vcltzd_s64:
9713 case NEON::BI__builtin_neon_vcltzd_f64:
9714 case NEON::BI__builtin_neon_vcltzs_f32:
9715 case NEON::BI__builtin_neon_vcltzh_f16:
9716 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9717 return EmitAArch64CompareBuiltinExpr(
9718 Ops[0], ConvertType(E->getCallReturnType(getContext())),
9719 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
9720
9721 case NEON::BI__builtin_neon_vceqzd_u64: {
9722 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9723 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9724 Ops[0] =
9725 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
9726 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
9727 }
9728 case NEON::BI__builtin_neon_vceqd_f64:
9729 case NEON::BI__builtin_neon_vcled_f64:
9730 case NEON::BI__builtin_neon_vcltd_f64:
9731 case NEON::BI__builtin_neon_vcged_f64:
9732 case NEON::BI__builtin_neon_vcgtd_f64: {
9733 llvm::CmpInst::Predicate P;
9734 switch (BuiltinID) {
9735 default: llvm_unreachable("missing builtin ID in switch!");
9736 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
9737 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
9738 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
9739 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
9740 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
9741 }
9742 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9743 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
9744 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
9745 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9746 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
9747 }
9748 case NEON::BI__builtin_neon_vceqs_f32:
9749 case NEON::BI__builtin_neon_vcles_f32:
9750 case NEON::BI__builtin_neon_vclts_f32:
9751 case NEON::BI__builtin_neon_vcges_f32:
9752 case NEON::BI__builtin_neon_vcgts_f32: {
9753 llvm::CmpInst::Predicate P;
9754 switch (BuiltinID) {
9755 default: llvm_unreachable("missing builtin ID in switch!");
9756 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
9757 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
9758 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
9759 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
9760 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
9761 }
9762 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9763 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
9764 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
9765 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9766 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
9767 }
9768 case NEON::BI__builtin_neon_vceqh_f16:
9769 case NEON::BI__builtin_neon_vcleh_f16:
9770 case NEON::BI__builtin_neon_vclth_f16:
9771 case NEON::BI__builtin_neon_vcgeh_f16:
9772 case NEON::BI__builtin_neon_vcgth_f16: {
9773 llvm::CmpInst::Predicate P;
9774 switch (BuiltinID) {
9775 default: llvm_unreachable("missing builtin ID in switch!");
9776 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
9777 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
9778 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
9779 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
9780 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
9781 }
9782 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9783 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
9784 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
9785 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
9786 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
9787 }
9788 case NEON::BI__builtin_neon_vceqd_s64:
9789 case NEON::BI__builtin_neon_vceqd_u64:
9790 case NEON::BI__builtin_neon_vcgtd_s64:
9791 case NEON::BI__builtin_neon_vcgtd_u64:
9792 case NEON::BI__builtin_neon_vcltd_s64:
9793 case NEON::BI__builtin_neon_vcltd_u64:
9794 case NEON::BI__builtin_neon_vcged_u64:
9795 case NEON::BI__builtin_neon_vcged_s64:
9796 case NEON::BI__builtin_neon_vcled_u64:
9797 case NEON::BI__builtin_neon_vcled_s64: {
9798 llvm::CmpInst::Predicate P;
9799 switch (BuiltinID) {
9800 default: llvm_unreachable("missing builtin ID in switch!");
9801 case NEON::BI__builtin_neon_vceqd_s64:
9802 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
9803 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
9804 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
9805 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
9806 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
9807 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
9808 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
9809 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
9810 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
9811 }
9812 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9813 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9814 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9815 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
9816 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
9817 }
9818 case NEON::BI__builtin_neon_vtstd_s64:
9819 case NEON::BI__builtin_neon_vtstd_u64: {
9820 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9821 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
9822 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
9823 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
9824 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
9825 llvm::Constant::getNullValue(Int64Ty));
9826 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
9827 }
9828 case NEON::BI__builtin_neon_vset_lane_i8:
9829 case NEON::BI__builtin_neon_vset_lane_i16:
9830 case NEON::BI__builtin_neon_vset_lane_i32:
9831 case NEON::BI__builtin_neon_vset_lane_i64:
9832 case NEON::BI__builtin_neon_vset_lane_bf16:
9833 case NEON::BI__builtin_neon_vset_lane_f32:
9834 case NEON::BI__builtin_neon_vsetq_lane_i8:
9835 case NEON::BI__builtin_neon_vsetq_lane_i16:
9836 case NEON::BI__builtin_neon_vsetq_lane_i32:
9837 case NEON::BI__builtin_neon_vsetq_lane_i64:
9838 case NEON::BI__builtin_neon_vsetq_lane_bf16:
9839 case NEON::BI__builtin_neon_vsetq_lane_f32:
9840 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9841 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9842 case NEON::BI__builtin_neon_vset_lane_f64:
9843 // The vector type needs a cast for the v1f64 variant.
9844 Ops[1] =
9845 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
9846 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9847 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9848 case NEON::BI__builtin_neon_vsetq_lane_f64:
9849 // The vector type needs a cast for the v2f64 variant.
9850 Ops[1] =
9851 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
9852 Ops.push_back(EmitScalarExpr(E->getArg(2)));
9853 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
9854
9855 case NEON::BI__builtin_neon_vget_lane_i8:
9856 case NEON::BI__builtin_neon_vdupb_lane_i8:
9857 Ops[0] =
9858 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
9859 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9860 "vget_lane");
9861 case NEON::BI__builtin_neon_vgetq_lane_i8:
9862 case NEON::BI__builtin_neon_vdupb_laneq_i8:
9863 Ops[0] =
9864 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
9865 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9866 "vgetq_lane");
9867 case NEON::BI__builtin_neon_vget_lane_i16:
9868 case NEON::BI__builtin_neon_vduph_lane_i16:
9869 Ops[0] =
9870 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
9871 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9872 "vget_lane");
9873 case NEON::BI__builtin_neon_vgetq_lane_i16:
9874 case NEON::BI__builtin_neon_vduph_laneq_i16:
9875 Ops[0] =
9876 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
9877 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9878 "vgetq_lane");
9879 case NEON::BI__builtin_neon_vget_lane_i32:
9880 case NEON::BI__builtin_neon_vdups_lane_i32:
9881 Ops[0] =
9882 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
9883 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9884 "vget_lane");
9885 case NEON::BI__builtin_neon_vdups_lane_f32:
9886 Ops[0] =
9887 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
9888 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9889 "vdups_lane");
9890 case NEON::BI__builtin_neon_vgetq_lane_i32:
9891 case NEON::BI__builtin_neon_vdups_laneq_i32:
9892 Ops[0] =
9893 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
9894 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9895 "vgetq_lane");
9896 case NEON::BI__builtin_neon_vget_lane_i64:
9897 case NEON::BI__builtin_neon_vdupd_lane_i64:
9898 Ops[0] =
9899 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
9900 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9901 "vget_lane");
9902 case NEON::BI__builtin_neon_vdupd_lane_f64:
9903 Ops[0] =
9904 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
9905 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9906 "vdupd_lane");
9907 case NEON::BI__builtin_neon_vgetq_lane_i64:
9908 case NEON::BI__builtin_neon_vdupd_laneq_i64:
9909 Ops[0] =
9910 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
9911 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9912 "vgetq_lane");
9913 case NEON::BI__builtin_neon_vget_lane_f32:
9914 Ops[0] =
9915 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
9916 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9917 "vget_lane");
9918 case NEON::BI__builtin_neon_vget_lane_f64:
9919 Ops[0] =
9920 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
9921 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9922 "vget_lane");
9923 case NEON::BI__builtin_neon_vgetq_lane_f32:
9924 case NEON::BI__builtin_neon_vdups_laneq_f32:
9925 Ops[0] =
9926 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
9927 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9928 "vgetq_lane");
9929 case NEON::BI__builtin_neon_vgetq_lane_f64:
9930 case NEON::BI__builtin_neon_vdupd_laneq_f64:
9931 Ops[0] =
9932 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
9933 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
9934 "vgetq_lane");
9935 case NEON::BI__builtin_neon_vaddh_f16:
9936 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9937 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
9938 case NEON::BI__builtin_neon_vsubh_f16:
9939 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9940 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
9941 case NEON::BI__builtin_neon_vmulh_f16:
9942 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9943 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
9944 case NEON::BI__builtin_neon_vdivh_f16:
9945 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9946 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
9947 case NEON::BI__builtin_neon_vfmah_f16:
9948 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
9949 return emitCallMaybeConstrainedFPBuiltin(
9950 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
9951 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
9952 case NEON::BI__builtin_neon_vfmsh_f16: {
9953 // FIXME: This should be an fneg instruction:
9954 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
9955 Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
9956
9957 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
9958 return emitCallMaybeConstrainedFPBuiltin(
9959 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
9960 {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
9961 }
9962 case NEON::BI__builtin_neon_vaddd_s64:
9963 case NEON::BI__builtin_neon_vaddd_u64:
9964 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
9965 case NEON::BI__builtin_neon_vsubd_s64:
9966 case NEON::BI__builtin_neon_vsubd_u64:
9967 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
9968 case NEON::BI__builtin_neon_vqdmlalh_s16:
9969 case NEON::BI__builtin_neon_vqdmlslh_s16: {
9970 SmallVector<Value *, 2> ProductOps;
9971 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
9972 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
9973 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
9974 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
9975 ProductOps, "vqdmlXl");
9976 Constant *CI = ConstantInt::get(SizeTy, 0);
9977 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
9978
9979 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
9980 ? Intrinsic::aarch64_neon_sqadd
9981 : Intrinsic::aarch64_neon_sqsub;
9982 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
9983 }
9984 case NEON::BI__builtin_neon_vqshlud_n_s64: {
9985 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9986 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
9987 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
9988 Ops, "vqshlu_n");
9989 }
9990 case NEON::BI__builtin_neon_vqshld_n_u64:
9991 case NEON::BI__builtin_neon_vqshld_n_s64: {
9992 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
9993 ? Intrinsic::aarch64_neon_uqshl
9994 : Intrinsic::aarch64_neon_sqshl;
9995 Ops.push_back(EmitScalarExpr(E->getArg(1)));
9996 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
9997 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
9998 }
9999 case NEON::BI__builtin_neon_vrshrd_n_u64:
10000 case NEON::BI__builtin_neon_vrshrd_n_s64: {
10001 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
10002 ? Intrinsic::aarch64_neon_urshl
10003 : Intrinsic::aarch64_neon_srshl;
10004 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10005 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
10006 Ops[1] = ConstantInt::get(Int64Ty, -SV);
10007 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
10008 }
10009 case NEON::BI__builtin_neon_vrsrad_n_u64:
10010 case NEON::BI__builtin_neon_vrsrad_n_s64: {
10011 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
10012 ? Intrinsic::aarch64_neon_urshl
10013 : Intrinsic::aarch64_neon_srshl;
10014 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
10015 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
10016 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
10017 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
10018 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
10019 }
10020 case NEON::BI__builtin_neon_vshld_n_s64:
10021 case NEON::BI__builtin_neon_vshld_n_u64: {
10022 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10023 return Builder.CreateShl(
10024 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
10025 }
10026 case NEON::BI__builtin_neon_vshrd_n_s64: {
10027 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10028 return Builder.CreateAShr(
10029 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
10030 Amt->getZExtValue())),
10031 "shrd_n");
10032 }
10033 case NEON::BI__builtin_neon_vshrd_n_u64: {
10034 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10035 uint64_t ShiftAmt = Amt->getZExtValue();
10036 // Right-shifting an unsigned value by its size yields 0.
10037 if (ShiftAmt == 64)
10038 return ConstantInt::get(Int64Ty, 0);
10039 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
10040 "shrd_n");
10041 }
10042 case NEON::BI__builtin_neon_vsrad_n_s64: {
10043 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
10044 Ops[1] = Builder.CreateAShr(
10045 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
10046 Amt->getZExtValue())),
10047 "shrd_n");
10048 return Builder.CreateAdd(Ops[0], Ops[1]);
10049 }
10050 case NEON::BI__builtin_neon_vsrad_n_u64: {
10051 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
10052 uint64_t ShiftAmt = Amt->getZExtValue();
10053 // Right-shifting an unsigned value by its size yields 0.
10054 // As Op + 0 = Op, return Ops[0] directly.
10055 if (ShiftAmt == 64)
10056 return Ops[0];
10057 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
10058 "shrd_n");
10059 return Builder.CreateAdd(Ops[0], Ops[1]);
10060 }
10061 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
10062 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
10063 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
10064 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
10065 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
10066 "lane");
10067 SmallVector<Value *, 2> ProductOps;
10068 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
10069 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
10070 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
10071 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
10072 ProductOps, "vqdmlXl");
10073 Constant *CI = ConstantInt::get(SizeTy, 0);
10074 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
10075 Ops.pop_back();
10076
10077 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
10078 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
10079 ? Intrinsic::aarch64_neon_sqadd
10080 : Intrinsic::aarch64_neon_sqsub;
10081 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
10082 }
10083 case NEON::BI__builtin_neon_vqdmlals_s32:
10084 case NEON::BI__builtin_neon_vqdmlsls_s32: {
10085 SmallVector<Value *, 2> ProductOps;
10086 ProductOps.push_back(Ops[1]);
10087 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
10088 Ops[1] =
10089 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
10090 ProductOps, "vqdmlXl");
10091
10092 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
10093 ? Intrinsic::aarch64_neon_sqadd
10094 : Intrinsic::aarch64_neon_sqsub;
10095 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
10096 }
10097 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
10098 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
10099 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
10100 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
10101 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
10102 "lane");
10103 SmallVector<Value *, 2> ProductOps;
10104 ProductOps.push_back(Ops[1]);
10105 ProductOps.push_back(Ops[2]);
10106 Ops[1] =
10107 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
10108 ProductOps, "vqdmlXl");
10109 Ops.pop_back();
10110
10111 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
10112 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
10113 ? Intrinsic::aarch64_neon_sqadd
10114 : Intrinsic::aarch64_neon_sqsub;
10115 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
10116 }
10117 case NEON::BI__builtin_neon_vget_lane_bf16:
10118 case NEON::BI__builtin_neon_vduph_lane_bf16:
10119 case NEON::BI__builtin_neon_vduph_lane_f16: {
10120 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10121 "vget_lane");
10122 }
10123 case NEON::BI__builtin_neon_vgetq_lane_bf16:
10124 case NEON::BI__builtin_neon_vduph_laneq_bf16:
10125 case NEON::BI__builtin_neon_vduph_laneq_f16: {
10126 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10127 "vgetq_lane");
10128 }
10129
10130 case AArch64::BI_InterlockedAdd: {
10131 Value *Arg0 = EmitScalarExpr(E->getArg(0));
10132 Value *Arg1 = EmitScalarExpr(E->getArg(1));
10133 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
10134 AtomicRMWInst::Add, Arg0, Arg1,
10135 llvm::AtomicOrdering::SequentiallyConsistent);
10136 return Builder.CreateAdd(RMWI, Arg1);
10137 }
10138 }
10139
10140 llvm::FixedVectorType *VTy = GetNeonType(this, Type);
10141 llvm::Type *Ty = VTy;
10142 if (!Ty)
10143 return nullptr;
10144
10145 // Not all intrinsics handled by the common case work for AArch64 yet, so only
10146 // defer to common code if it's been added to our special map.
10147 Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
10148 AArch64SIMDIntrinsicsProvenSorted);
10149
10150 if (Builtin)
10151 return EmitCommonNeonBuiltinExpr(
10152 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
10153 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
10154 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
10155
10156 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
10157 return V;
10158
10159 unsigned Int;
10160 switch (BuiltinID) {
10161 default: return nullptr;
10162 case NEON::BI__builtin_neon_vbsl_v:
10163 case NEON::BI__builtin_neon_vbslq_v: {
10164 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
10165 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
10166 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
10167 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
10168
10169 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
10170 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
10171 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
10172 return Builder.CreateBitCast(Ops[0], Ty);
10173 }
10174 case NEON::BI__builtin_neon_vfma_lane_v:
10175 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
10176 // The ARM builtins (and instructions) have the addend as the first
10177 // operand, but the 'fma' intrinsics have it last. Swap it around here.
10178 Value *Addend = Ops[0];
10179 Value *Multiplicand = Ops[1];
10180 Value *LaneSource = Ops[2];
10181 Ops[0] = Multiplicand;
10182 Ops[1] = LaneSource;
10183 Ops[2] = Addend;
10184
10185 // Now adjust things to handle the lane access.
10186 auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
10187 ? llvm::FixedVectorType::get(VTy->getElementType(),
10188 VTy->getNumElements() / 2)
10189 : VTy;
10190 llvm::Constant *cst = cast<Constant>(Ops[3]);
10191 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
10192 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
10193 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
10194
10195 Ops.pop_back();
10196 Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
10197 : Intrinsic::fma;
10198 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
10199 }
10200 case NEON::BI__builtin_neon_vfma_laneq_v: {
10201 auto *VTy = cast<llvm::FixedVectorType>(Ty);
10202 // v1f64 fma should be mapped to Neon scalar f64 fma
10203 if (VTy && VTy->getElementType() == DoubleTy) {
10204 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10205 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
10206 llvm::FixedVectorType *VTy =
10207 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
10208 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
10209 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
10210 Value *Result;
10211 Result = emitCallMaybeConstrainedFPBuiltin(
10212 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
10213 DoubleTy, {Ops[1], Ops[2], Ops[0]});
10214 return Builder.CreateBitCast(Result, Ty);
10215 }
10216 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10217 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10218
10219 auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
10220 VTy->getNumElements() * 2);
10221 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
10222 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
10223 cast<ConstantInt>(Ops[3]));
10224 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
10225
10226 return emitCallMaybeConstrainedFPBuiltin(
10227 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10228 {Ops[2], Ops[1], Ops[0]});
10229 }
10230 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
10231 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10232 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10233
10234 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10235 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
10236 return emitCallMaybeConstrainedFPBuiltin(
10237 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10238 {Ops[2], Ops[1], Ops[0]});
10239 }
10240 case NEON::BI__builtin_neon_vfmah_lane_f16:
10241 case NEON::BI__builtin_neon_vfmas_lane_f32:
10242 case NEON::BI__builtin_neon_vfmah_laneq_f16:
10243 case NEON::BI__builtin_neon_vfmas_laneq_f32:
10244 case NEON::BI__builtin_neon_vfmad_lane_f64:
10245 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
10246 Ops.push_back(EmitScalarExpr(E->getArg(3)));
10247 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
10248 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
10249 return emitCallMaybeConstrainedFPBuiltin(
10250 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10251 {Ops[1], Ops[2], Ops[0]});
10252 }
10253 case NEON::BI__builtin_neon_vmull_v:
10254 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10255 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
10256 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
10257 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
10258 case NEON::BI__builtin_neon_vmax_v:
10259 case NEON::BI__builtin_neon_vmaxq_v:
10260 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10261 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
10262 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
10263 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
10264 case NEON::BI__builtin_neon_vmaxh_f16: {
10265 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10266 Int = Intrinsic::aarch64_neon_fmax;
10267 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
10268 }
10269 case NEON::BI__builtin_neon_vmin_v:
10270 case NEON::BI__builtin_neon_vminq_v:
10271 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10272 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
10273 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
10274 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
10275 case NEON::BI__builtin_neon_vminh_f16: {
10276 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10277 Int = Intrinsic::aarch64_neon_fmin;
10278 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
10279 }
10280 case NEON::BI__builtin_neon_vabd_v:
10281 case NEON::BI__builtin_neon_vabdq_v:
10282 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10283 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
10284 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
10285 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
10286 case NEON::BI__builtin_neon_vpadal_v:
10287 case NEON::BI__builtin_neon_vpadalq_v: {
10288 unsigned ArgElts = VTy->getNumElements();
10289 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
10290 unsigned BitWidth = EltTy->getBitWidth();
10291 auto *ArgTy = llvm::FixedVectorType::get(
10292 llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
10293 llvm::Type* Tys[2] = { VTy, ArgTy };
10294 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
10295 SmallVector<llvm::Value*, 1> TmpOps;
10296 TmpOps.push_back(Ops[1]);
10297 Function *F = CGM.getIntrinsic(Int, Tys);
10298 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
10299 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
10300 return Builder.CreateAdd(tmp, addend);
10301 }
10302 case NEON::BI__builtin_neon_vpmin_v:
10303 case NEON::BI__builtin_neon_vpminq_v:
10304 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10305 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
10306 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
10307 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
10308 case NEON::BI__builtin_neon_vpmax_v:
10309 case NEON::BI__builtin_neon_vpmaxq_v:
10310 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10311 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
10312 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
10313 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
10314 case NEON::BI__builtin_neon_vminnm_v:
10315 case NEON::BI__builtin_neon_vminnmq_v:
10316 Int = Intrinsic::aarch64_neon_fminnm;
10317 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
10318 case NEON::BI__builtin_neon_vminnmh_f16:
10319 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10320 Int = Intrinsic::aarch64_neon_fminnm;
10321 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
10322 case NEON::BI__builtin_neon_vmaxnm_v:
10323 case NEON::BI__builtin_neon_vmaxnmq_v:
10324 Int = Intrinsic::aarch64_neon_fmaxnm;
10325 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
10326 case NEON::BI__builtin_neon_vmaxnmh_f16:
10327 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10328 Int = Intrinsic::aarch64_neon_fmaxnm;
10329 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
10330 case NEON::BI__builtin_neon_vrecpss_f32: {
10331 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10332 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
10333 Ops, "vrecps");
10334 }
10335 case NEON::BI__builtin_neon_vrecpsd_f64:
10336 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10337 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
10338 Ops, "vrecps");
10339 case NEON::BI__builtin_neon_vrecpsh_f16:
10340 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10341 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
10342 Ops, "vrecps");
10343 case NEON::BI__builtin_neon_vqshrun_n_v:
10344 Int = Intrinsic::aarch64_neon_sqshrun;
10345 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
10346 case NEON::BI__builtin_neon_vqrshrun_n_v:
10347 Int = Intrinsic::aarch64_neon_sqrshrun;
10348 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
10349 case NEON::BI__builtin_neon_vqshrn_n_v:
10350 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
10351 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
10352 case NEON::BI__builtin_neon_vrshrn_n_v:
10353 Int = Intrinsic::aarch64_neon_rshrn;
10354 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
10355 case NEON::BI__builtin_neon_vqrshrn_n_v:
10356 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
10357 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
10358 case NEON::BI__builtin_neon_vrndah_f16: {
10359 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10360 Int = Builder.getIsFPConstrained()
10361 ? Intrinsic::experimental_constrained_round
10362 : Intrinsic::round;
10363 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
10364 }
10365 case NEON::BI__builtin_neon_vrnda_v:
10366 case NEON::BI__builtin_neon_vrndaq_v: {
10367 Int = Builder.getIsFPConstrained()
10368 ? Intrinsic::experimental_constrained_round
10369 : Intrinsic::round;
10370 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
10371 }
10372 case NEON::BI__builtin_neon_vrndih_f16: {
10373 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10374 Int = Builder.getIsFPConstrained()
10375 ? Intrinsic::experimental_constrained_nearbyint
10376 : Intrinsic::nearbyint;
10377 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
10378 }
10379 case NEON::BI__builtin_neon_vrndmh_f16: {
10380 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10381 Int = Builder.getIsFPConstrained()
10382 ? Intrinsic::experimental_constrained_floor
10383 : Intrinsic::floor;
10384 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
10385 }
10386 case NEON::BI__builtin_neon_vrndm_v:
10387 case NEON::BI__builtin_neon_vrndmq_v: {
10388 Int = Builder.getIsFPConstrained()
10389 ? Intrinsic::experimental_constrained_floor
10390 : Intrinsic::floor;
10391 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
10392 }
10393 case NEON::BI__builtin_neon_vrndnh_f16: {
10394 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10395 Int = Intrinsic::aarch64_neon_frintn;
10396 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
10397 }
10398 case NEON::BI__builtin_neon_vrndn_v:
10399 case NEON::BI__builtin_neon_vrndnq_v: {
10400 Int = Intrinsic::aarch64_neon_frintn;
10401 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
10402 }
10403 case NEON::BI__builtin_neon_vrndns_f32: {
10404 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10405 Int = Intrinsic::aarch64_neon_frintn;
10406 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
10407 }
10408 case NEON::BI__builtin_neon_vrndph_f16: {
10409 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10410 Int = Builder.getIsFPConstrained()
10411 ? Intrinsic::experimental_constrained_ceil
10412 : Intrinsic::ceil;
10413 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
10414 }
10415 case NEON::BI__builtin_neon_vrndp_v:
10416 case NEON::BI__builtin_neon_vrndpq_v: {
10417 Int = Builder.getIsFPConstrained()
10418 ? Intrinsic::experimental_constrained_ceil
10419 : Intrinsic::ceil;
10420 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
10421 }
10422 case NEON::BI__builtin_neon_vrndxh_f16: {
10423 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10424 Int = Builder.getIsFPConstrained()
10425 ? Intrinsic::experimental_constrained_rint
10426 : Intrinsic::rint;
10427 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
10428 }
10429 case NEON::BI__builtin_neon_vrndx_v:
10430 case NEON::BI__builtin_neon_vrndxq_v: {
10431 Int = Builder.getIsFPConstrained()
10432 ? Intrinsic::experimental_constrained_rint
10433 : Intrinsic::rint;
10434 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
10435 }
10436 case NEON::BI__builtin_neon_vrndh_f16: {
10437 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10438 Int = Builder.getIsFPConstrained()
10439 ? Intrinsic::experimental_constrained_trunc
10440 : Intrinsic::trunc;
10441 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
10442 }
10443 case NEON::BI__builtin_neon_vrnd_v:
10444 case NEON::BI__builtin_neon_vrndq_v: {
10445 Int = Builder.getIsFPConstrained()
10446 ? Intrinsic::experimental_constrained_trunc
10447 : Intrinsic::trunc;
10448 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
10449 }
10450 case NEON::BI__builtin_neon_vcvt_f64_v:
10451 case NEON::BI__builtin_neon_vcvtq_f64_v:
10452 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10453 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
10454 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
10455 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
10456 case NEON::BI__builtin_neon_vcvt_f64_f32: {
10457 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
10458 "unexpected vcvt_f64_f32 builtin");
10459 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
10460 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10461
10462 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
10463 }
10464 case NEON::BI__builtin_neon_vcvt_f32_f64: {
10465 assert(Type.getEltType() == NeonTypeFlags::Float32 &&
10466 "unexpected vcvt_f32_f64 builtin");
10467 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
10468 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10469
10470 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
10471 }
10472 case NEON::BI__builtin_neon_vcvt_s32_v:
10473 case NEON::BI__builtin_neon_vcvt_u32_v:
10474 case NEON::BI__builtin_neon_vcvt_s64_v:
10475 case NEON::BI__builtin_neon_vcvt_u64_v:
10476 case NEON::BI__builtin_neon_vcvt_s16_v:
10477 case NEON::BI__builtin_neon_vcvt_u16_v:
10478 case NEON::BI__builtin_neon_vcvtq_s32_v:
10479 case NEON::BI__builtin_neon_vcvtq_u32_v:
10480 case NEON::BI__builtin_neon_vcvtq_s64_v:
10481 case NEON::BI__builtin_neon_vcvtq_u64_v:
10482 case NEON::BI__builtin_neon_vcvtq_s16_v:
10483 case NEON::BI__builtin_neon_vcvtq_u16_v: {
10484 Int =
10485 usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
10486 llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
10487 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
10488 }
10489 case NEON::BI__builtin_neon_vcvta_s16_v:
10490 case NEON::BI__builtin_neon_vcvta_u16_v:
10491 case NEON::BI__builtin_neon_vcvta_s32_v:
10492 case NEON::BI__builtin_neon_vcvtaq_s16_v:
10493 case NEON::BI__builtin_neon_vcvtaq_s32_v:
10494 case NEON::BI__builtin_neon_vcvta_u32_v:
10495 case NEON::BI__builtin_neon_vcvtaq_u16_v:
10496 case NEON::BI__builtin_neon_vcvtaq_u32_v:
10497 case NEON::BI__builtin_neon_vcvta_s64_v:
10498 case NEON::BI__builtin_neon_vcvtaq_s64_v:
10499 case NEON::BI__builtin_neon_vcvta_u64_v:
10500 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
10501 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
10502 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10503 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
10504 }
10505 case NEON::BI__builtin_neon_vcvtm_s16_v:
10506 case NEON::BI__builtin_neon_vcvtm_s32_v:
10507 case NEON::BI__builtin_neon_vcvtmq_s16_v:
10508 case NEON::BI__builtin_neon_vcvtmq_s32_v:
10509 case NEON::BI__builtin_neon_vcvtm_u16_v:
10510 case NEON::BI__builtin_neon_vcvtm_u32_v:
10511 case NEON::BI__builtin_neon_vcvtmq_u16_v:
10512 case NEON::BI__builtin_neon_vcvtmq_u32_v:
10513 case NEON::BI__builtin_neon_vcvtm_s64_v:
10514 case NEON::BI__builtin_neon_vcvtmq_s64_v:
10515 case NEON::BI__builtin_neon_vcvtm_u64_v:
10516 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
10517 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
10518 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10519 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
10520 }
10521 case NEON::BI__builtin_neon_vcvtn_s16_v:
10522 case NEON::BI__builtin_neon_vcvtn_s32_v:
10523 case NEON::BI__builtin_neon_vcvtnq_s16_v:
10524 case NEON::BI__builtin_neon_vcvtnq_s32_v:
10525 case NEON::BI__builtin_neon_vcvtn_u16_v:
10526 case NEON::BI__builtin_neon_vcvtn_u32_v:
10527 case NEON::BI__builtin_neon_vcvtnq_u16_v:
10528 case NEON::BI__builtin_neon_vcvtnq_u32_v:
10529 case NEON::BI__builtin_neon_vcvtn_s64_v:
10530 case NEON::BI__builtin_neon_vcvtnq_s64_v:
10531 case NEON::BI__builtin_neon_vcvtn_u64_v:
10532 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
10533 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
10534 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10535 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
10536 }
10537 case NEON::BI__builtin_neon_vcvtp_s16_v:
10538 case NEON::BI__builtin_neon_vcvtp_s32_v:
10539 case NEON::BI__builtin_neon_vcvtpq_s16_v:
10540 case NEON::BI__builtin_neon_vcvtpq_s32_v:
10541 case NEON::BI__builtin_neon_vcvtp_u16_v:
10542 case NEON::BI__builtin_neon_vcvtp_u32_v:
10543 case NEON::BI__builtin_neon_vcvtpq_u16_v:
10544 case NEON::BI__builtin_neon_vcvtpq_u32_v:
10545 case NEON::BI__builtin_neon_vcvtp_s64_v:
10546 case NEON::BI__builtin_neon_vcvtpq_s64_v:
10547 case NEON::BI__builtin_neon_vcvtp_u64_v:
10548 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
10549 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
10550 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10551 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
10552 }
10553 case NEON::BI__builtin_neon_vmulx_v:
10554 case NEON::BI__builtin_neon_vmulxq_v: {
10555 Int = Intrinsic::aarch64_neon_fmulx;
10556 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
10557 }
10558 case NEON::BI__builtin_neon_vmulxh_lane_f16:
10559 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
10560 // vmulx_lane should be mapped to Neon scalar mulx after
10561 // extracting the scalar element
10562 Ops.push_back(EmitScalarExpr(E->getArg(2)));
10563 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
10564 Ops.pop_back();
10565 Int = Intrinsic::aarch64_neon_fmulx;
10566 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
10567 }
10568 case NEON::BI__builtin_neon_vmul_lane_v:
10569 case NEON::BI__builtin_neon_vmul_laneq_v: {
10570 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
10571 bool Quad = false;
10572 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
10573 Quad = true;
10574 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10575 llvm::FixedVectorType *VTy =
10576 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
10577 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
10578 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
10579 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
10580 return Builder.CreateBitCast(Result, Ty);
10581 }
10582 case NEON::BI__builtin_neon_vnegd_s64:
10583 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
10584 case NEON::BI__builtin_neon_vnegh_f16:
10585 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
10586 case NEON::BI__builtin_neon_vpmaxnm_v:
10587 case NEON::BI__builtin_neon_vpmaxnmq_v: {
10588 Int = Intrinsic::aarch64_neon_fmaxnmp;
10589 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
10590 }
10591 case NEON::BI__builtin_neon_vpminnm_v:
10592 case NEON::BI__builtin_neon_vpminnmq_v: {
10593 Int = Intrinsic::aarch64_neon_fminnmp;
10594 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
10595 }
10596 case NEON::BI__builtin_neon_vsqrth_f16: {
10597 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10598 Int = Builder.getIsFPConstrained()
10599 ? Intrinsic::experimental_constrained_sqrt
10600 : Intrinsic::sqrt;
10601 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
10602 }
10603 case NEON::BI__builtin_neon_vsqrt_v:
10604 case NEON::BI__builtin_neon_vsqrtq_v: {
10605 Int = Builder.getIsFPConstrained()
10606 ? Intrinsic::experimental_constrained_sqrt
10607 : Intrinsic::sqrt;
10608 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10609 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
10610 }
10611 case NEON::BI__builtin_neon_vrbit_v:
10612 case NEON::BI__builtin_neon_vrbitq_v: {
10613 Int = Intrinsic::aarch64_neon_rbit;
10614 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
10615 }
10616 case NEON::BI__builtin_neon_vaddv_u8:
10617 // FIXME: These are handled by the AArch64 scalar code.
10618 usgn = true;
10619 LLVM_FALLTHROUGH;
10620 case NEON::BI__builtin_neon_vaddv_s8: {
10621 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10622 Ty = Int32Ty;
10623 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10624 llvm::Type *Tys[2] = { Ty, VTy };
10625 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10626 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10627 return Builder.CreateTrunc(Ops[0], Int8Ty);
10628 }
10629 case NEON::BI__builtin_neon_vaddv_u16:
10630 usgn = true;
10631 LLVM_FALLTHROUGH;
10632 case NEON::BI__builtin_neon_vaddv_s16: {
10633 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10634 Ty = Int32Ty;
10635 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10636 llvm::Type *Tys[2] = { Ty, VTy };
10637 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10638 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10639 return Builder.CreateTrunc(Ops[0], Int16Ty);
10640 }
10641 case NEON::BI__builtin_neon_vaddvq_u8:
10642 usgn = true;
10643 LLVM_FALLTHROUGH;
10644 case NEON::BI__builtin_neon_vaddvq_s8: {
10645 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10646 Ty = Int32Ty;
10647 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10648 llvm::Type *Tys[2] = { Ty, VTy };
10649 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10650 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10651 return Builder.CreateTrunc(Ops[0], Int8Ty);
10652 }
10653 case NEON::BI__builtin_neon_vaddvq_u16:
10654 usgn = true;
10655 LLVM_FALLTHROUGH;
10656 case NEON::BI__builtin_neon_vaddvq_s16: {
10657 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
10658 Ty = Int32Ty;
10659 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10660 llvm::Type *Tys[2] = { Ty, VTy };
10661 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10662 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
10663 return Builder.CreateTrunc(Ops[0], Int16Ty);
10664 }
10665 case NEON::BI__builtin_neon_vmaxv_u8: {
10666 Int = Intrinsic::aarch64_neon_umaxv;
10667 Ty = Int32Ty;
10668 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10669 llvm::Type *Tys[2] = { Ty, VTy };
10670 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10671 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10672 return Builder.CreateTrunc(Ops[0], Int8Ty);
10673 }
10674 case NEON::BI__builtin_neon_vmaxv_u16: {
10675 Int = Intrinsic::aarch64_neon_umaxv;
10676 Ty = Int32Ty;
10677 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10678 llvm::Type *Tys[2] = { Ty, VTy };
10679 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10680 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10681 return Builder.CreateTrunc(Ops[0], Int16Ty);
10682 }
10683 case NEON::BI__builtin_neon_vmaxvq_u8: {
10684 Int = Intrinsic::aarch64_neon_umaxv;
10685 Ty = Int32Ty;
10686 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10687 llvm::Type *Tys[2] = { Ty, VTy };
10688 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10689 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10690 return Builder.CreateTrunc(Ops[0], Int8Ty);
10691 }
10692 case NEON::BI__builtin_neon_vmaxvq_u16: {
10693 Int = Intrinsic::aarch64_neon_umaxv;
10694 Ty = Int32Ty;
10695 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10696 llvm::Type *Tys[2] = { Ty, VTy };
10697 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10698 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10699 return Builder.CreateTrunc(Ops[0], Int16Ty);
10700 }
10701 case NEON::BI__builtin_neon_vmaxv_s8: {
10702 Int = Intrinsic::aarch64_neon_smaxv;
10703 Ty = Int32Ty;
10704 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10705 llvm::Type *Tys[2] = { Ty, VTy };
10706 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10707 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10708 return Builder.CreateTrunc(Ops[0], Int8Ty);
10709 }
10710 case NEON::BI__builtin_neon_vmaxv_s16: {
10711 Int = Intrinsic::aarch64_neon_smaxv;
10712 Ty = Int32Ty;
10713 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10714 llvm::Type *Tys[2] = { Ty, VTy };
10715 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10716 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10717 return Builder.CreateTrunc(Ops[0], Int16Ty);
10718 }
10719 case NEON::BI__builtin_neon_vmaxvq_s8: {
10720 Int = Intrinsic::aarch64_neon_smaxv;
10721 Ty = Int32Ty;
10722 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10723 llvm::Type *Tys[2] = { Ty, VTy };
10724 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10725 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10726 return Builder.CreateTrunc(Ops[0], Int8Ty);
10727 }
10728 case NEON::BI__builtin_neon_vmaxvq_s16: {
10729 Int = Intrinsic::aarch64_neon_smaxv;
10730 Ty = Int32Ty;
10731 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10732 llvm::Type *Tys[2] = { Ty, VTy };
10733 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10734 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10735 return Builder.CreateTrunc(Ops[0], Int16Ty);
10736 }
10737 case NEON::BI__builtin_neon_vmaxv_f16: {
10738 Int = Intrinsic::aarch64_neon_fmaxv;
10739 Ty = HalfTy;
10740 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10741 llvm::Type *Tys[2] = { Ty, VTy };
10742 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10743 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10744 return Builder.CreateTrunc(Ops[0], HalfTy);
10745 }
10746 case NEON::BI__builtin_neon_vmaxvq_f16: {
10747 Int = Intrinsic::aarch64_neon_fmaxv;
10748 Ty = HalfTy;
10749 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10750 llvm::Type *Tys[2] = { Ty, VTy };
10751 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10752 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
10753 return Builder.CreateTrunc(Ops[0], HalfTy);
10754 }
10755 case NEON::BI__builtin_neon_vminv_u8: {
10756 Int = Intrinsic::aarch64_neon_uminv;
10757 Ty = Int32Ty;
10758 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10759 llvm::Type *Tys[2] = { Ty, VTy };
10760 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10761 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10762 return Builder.CreateTrunc(Ops[0], Int8Ty);
10763 }
10764 case NEON::BI__builtin_neon_vminv_u16: {
10765 Int = Intrinsic::aarch64_neon_uminv;
10766 Ty = Int32Ty;
10767 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10768 llvm::Type *Tys[2] = { Ty, VTy };
10769 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10770 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10771 return Builder.CreateTrunc(Ops[0], Int16Ty);
10772 }
10773 case NEON::BI__builtin_neon_vminvq_u8: {
10774 Int = Intrinsic::aarch64_neon_uminv;
10775 Ty = Int32Ty;
10776 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10777 llvm::Type *Tys[2] = { Ty, VTy };
10778 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10779 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10780 return Builder.CreateTrunc(Ops[0], Int8Ty);
10781 }
10782 case NEON::BI__builtin_neon_vminvq_u16: {
10783 Int = Intrinsic::aarch64_neon_uminv;
10784 Ty = Int32Ty;
10785 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10786 llvm::Type *Tys[2] = { Ty, VTy };
10787 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10788 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10789 return Builder.CreateTrunc(Ops[0], Int16Ty);
10790 }
10791 case NEON::BI__builtin_neon_vminv_s8: {
10792 Int = Intrinsic::aarch64_neon_sminv;
10793 Ty = Int32Ty;
10794 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10795 llvm::Type *Tys[2] = { Ty, VTy };
10796 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10797 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10798 return Builder.CreateTrunc(Ops[0], Int8Ty);
10799 }
10800 case NEON::BI__builtin_neon_vminv_s16: {
10801 Int = Intrinsic::aarch64_neon_sminv;
10802 Ty = Int32Ty;
10803 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10804 llvm::Type *Tys[2] = { Ty, VTy };
10805 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10806 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10807 return Builder.CreateTrunc(Ops[0], Int16Ty);
10808 }
10809 case NEON::BI__builtin_neon_vminvq_s8: {
10810 Int = Intrinsic::aarch64_neon_sminv;
10811 Ty = Int32Ty;
10812 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10813 llvm::Type *Tys[2] = { Ty, VTy };
10814 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10815 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10816 return Builder.CreateTrunc(Ops[0], Int8Ty);
10817 }
10818 case NEON::BI__builtin_neon_vminvq_s16: {
10819 Int = Intrinsic::aarch64_neon_sminv;
10820 Ty = Int32Ty;
10821 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10822 llvm::Type *Tys[2] = { Ty, VTy };
10823 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10824 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10825 return Builder.CreateTrunc(Ops[0], Int16Ty);
10826 }
10827 case NEON::BI__builtin_neon_vminv_f16: {
10828 Int = Intrinsic::aarch64_neon_fminv;
10829 Ty = HalfTy;
10830 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10831 llvm::Type *Tys[2] = { Ty, VTy };
10832 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10833 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10834 return Builder.CreateTrunc(Ops[0], HalfTy);
10835 }
10836 case NEON::BI__builtin_neon_vminvq_f16: {
10837 Int = Intrinsic::aarch64_neon_fminv;
10838 Ty = HalfTy;
10839 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10840 llvm::Type *Tys[2] = { Ty, VTy };
10841 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10842 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
10843 return Builder.CreateTrunc(Ops[0], HalfTy);
10844 }
10845 case NEON::BI__builtin_neon_vmaxnmv_f16: {
10846 Int = Intrinsic::aarch64_neon_fmaxnmv;
10847 Ty = HalfTy;
10848 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10849 llvm::Type *Tys[2] = { Ty, VTy };
10850 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10851 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
10852 return Builder.CreateTrunc(Ops[0], HalfTy);
10853 }
10854 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
10855 Int = Intrinsic::aarch64_neon_fmaxnmv;
10856 Ty = HalfTy;
10857 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10858 llvm::Type *Tys[2] = { Ty, VTy };
10859 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10860 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
10861 return Builder.CreateTrunc(Ops[0], HalfTy);
10862 }
10863 case NEON::BI__builtin_neon_vminnmv_f16: {
10864 Int = Intrinsic::aarch64_neon_fminnmv;
10865 Ty = HalfTy;
10866 VTy = llvm::FixedVectorType::get(HalfTy, 4);
10867 llvm::Type *Tys[2] = { Ty, VTy };
10868 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10869 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
10870 return Builder.CreateTrunc(Ops[0], HalfTy);
10871 }
10872 case NEON::BI__builtin_neon_vminnmvq_f16: {
10873 Int = Intrinsic::aarch64_neon_fminnmv;
10874 Ty = HalfTy;
10875 VTy = llvm::FixedVectorType::get(HalfTy, 8);
10876 llvm::Type *Tys[2] = { Ty, VTy };
10877 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10878 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
10879 return Builder.CreateTrunc(Ops[0], HalfTy);
10880 }
10881 case NEON::BI__builtin_neon_vmul_n_f64: {
10882 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10883 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
10884 return Builder.CreateFMul(Ops[0], RHS);
10885 }
10886 case NEON::BI__builtin_neon_vaddlv_u8: {
10887 Int = Intrinsic::aarch64_neon_uaddlv;
10888 Ty = Int32Ty;
10889 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10890 llvm::Type *Tys[2] = { Ty, VTy };
10891 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10892 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10893 return Builder.CreateTrunc(Ops[0], Int16Ty);
10894 }
10895 case NEON::BI__builtin_neon_vaddlv_u16: {
10896 Int = Intrinsic::aarch64_neon_uaddlv;
10897 Ty = Int32Ty;
10898 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10899 llvm::Type *Tys[2] = { Ty, VTy };
10900 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10901 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10902 }
10903 case NEON::BI__builtin_neon_vaddlvq_u8: {
10904 Int = Intrinsic::aarch64_neon_uaddlv;
10905 Ty = Int32Ty;
10906 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10907 llvm::Type *Tys[2] = { Ty, VTy };
10908 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10909 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10910 return Builder.CreateTrunc(Ops[0], Int16Ty);
10911 }
10912 case NEON::BI__builtin_neon_vaddlvq_u16: {
10913 Int = Intrinsic::aarch64_neon_uaddlv;
10914 Ty = Int32Ty;
10915 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10916 llvm::Type *Tys[2] = { Ty, VTy };
10917 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10918 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10919 }
10920 case NEON::BI__builtin_neon_vaddlv_s8: {
10921 Int = Intrinsic::aarch64_neon_saddlv;
10922 Ty = Int32Ty;
10923 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
10924 llvm::Type *Tys[2] = { Ty, VTy };
10925 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10926 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10927 return Builder.CreateTrunc(Ops[0], Int16Ty);
10928 }
10929 case NEON::BI__builtin_neon_vaddlv_s16: {
10930 Int = Intrinsic::aarch64_neon_saddlv;
10931 Ty = Int32Ty;
10932 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
10933 llvm::Type *Tys[2] = { Ty, VTy };
10934 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10935 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10936 }
10937 case NEON::BI__builtin_neon_vaddlvq_s8: {
10938 Int = Intrinsic::aarch64_neon_saddlv;
10939 Ty = Int32Ty;
10940 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
10941 llvm::Type *Tys[2] = { Ty, VTy };
10942 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10943 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10944 return Builder.CreateTrunc(Ops[0], Int16Ty);
10945 }
10946 case NEON::BI__builtin_neon_vaddlvq_s16: {
10947 Int = Intrinsic::aarch64_neon_saddlv;
10948 Ty = Int32Ty;
10949 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
10950 llvm::Type *Tys[2] = { Ty, VTy };
10951 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10952 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
10953 }
10954 case NEON::BI__builtin_neon_vsri_n_v:
10955 case NEON::BI__builtin_neon_vsriq_n_v: {
10956 Int = Intrinsic::aarch64_neon_vsri;
10957 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
10958 return EmitNeonCall(Intrin, Ops, "vsri_n");
10959 }
10960 case NEON::BI__builtin_neon_vsli_n_v:
10961 case NEON::BI__builtin_neon_vsliq_n_v: {
10962 Int = Intrinsic::aarch64_neon_vsli;
10963 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
10964 return EmitNeonCall(Intrin, Ops, "vsli_n");
10965 }
10966 case NEON::BI__builtin_neon_vsra_n_v:
10967 case NEON::BI__builtin_neon_vsraq_n_v:
10968 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10969 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
10970 return Builder.CreateAdd(Ops[0], Ops[1]);
10971 case NEON::BI__builtin_neon_vrsra_n_v:
10972 case NEON::BI__builtin_neon_vrsraq_n_v: {
10973 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
10974 SmallVector<llvm::Value*,2> TmpOps;
10975 TmpOps.push_back(Ops[1]);
10976 TmpOps.push_back(Ops[2]);
10977 Function* F = CGM.getIntrinsic(Int, Ty);
10978 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
10979 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
10980 return Builder.CreateAdd(Ops[0], tmp);
10981 }
10982 case NEON::BI__builtin_neon_vld1_v:
10983 case NEON::BI__builtin_neon_vld1q_v: {
10984 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
10985 return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
10986 }
10987 case NEON::BI__builtin_neon_vst1_v:
10988 case NEON::BI__builtin_neon_vst1q_v:
10989 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
10990 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
10991 return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
10992 case NEON::BI__builtin_neon_vld1_lane_v:
10993 case NEON::BI__builtin_neon_vld1q_lane_v: {
10994 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10995 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
10996 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10997 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
10998 PtrOp0.getAlignment());
10999 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
11000 }
11001 case NEON::BI__builtin_neon_vld1_dup_v:
11002 case NEON::BI__builtin_neon_vld1q_dup_v: {
11003 Value *V = UndefValue::get(Ty);
11004 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
11005 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11006 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
11007 PtrOp0.getAlignment());
11008 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
11009 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
11010 return EmitNeonSplat(Ops[0], CI);
11011 }
11012 case NEON::BI__builtin_neon_vst1_lane_v:
11013 case NEON::BI__builtin_neon_vst1q_lane_v:
11014 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11015 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
11016 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11017 return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
11018 PtrOp0.getAlignment());
11019 case NEON::BI__builtin_neon_vld2_v:
11020 case NEON::BI__builtin_neon_vld2q_v: {
11021 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
11022 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11023 llvm::Type *Tys[2] = { VTy, PTy };
11024 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
11025 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
11026 Ops[0] = Builder.CreateBitCast(Ops[0],
11027 llvm::PointerType::getUnqual(Ops[1]->getType()));
11028 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11029 }
11030 case NEON::BI__builtin_neon_vld3_v:
11031 case NEON::BI__builtin_neon_vld3q_v: {
11032 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
11033 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11034 llvm::Type *Tys[2] = { VTy, PTy };
11035 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
11036 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
11037 Ops[0] = Builder.CreateBitCast(Ops[0],
11038 llvm::PointerType::getUnqual(Ops[1]->getType()));
11039 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11040 }
11041 case NEON::BI__builtin_neon_vld4_v:
11042 case NEON::BI__builtin_neon_vld4q_v: {
11043 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
11044 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11045 llvm::Type *Tys[2] = { VTy, PTy };
11046 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
11047 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
11048 Ops[0] = Builder.CreateBitCast(Ops[0],
11049 llvm::PointerType::getUnqual(Ops[1]->getType()));
11050 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11051 }
11052 case NEON::BI__builtin_neon_vld2_dup_v:
11053 case NEON::BI__builtin_neon_vld2q_dup_v: {
11054 llvm::Type *PTy =
11055 llvm::PointerType::getUnqual(VTy->getElementType());
11056 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11057 llvm::Type *Tys[2] = { VTy, PTy };
11058 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
11059 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
11060 Ops[0] = Builder.CreateBitCast(Ops[0],
11061 llvm::PointerType::getUnqual(Ops[1]->getType()));
11062 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11063 }
11064 case NEON::BI__builtin_neon_vld3_dup_v:
11065 case NEON::BI__builtin_neon_vld3q_dup_v: {
11066 llvm::Type *PTy =
11067 llvm::PointerType::getUnqual(VTy->getElementType());
11068 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11069 llvm::Type *Tys[2] = { VTy, PTy };
11070 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
11071 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
11072 Ops[0] = Builder.CreateBitCast(Ops[0],
11073 llvm::PointerType::getUnqual(Ops[1]->getType()));
11074 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11075 }
11076 case NEON::BI__builtin_neon_vld4_dup_v:
11077 case NEON::BI__builtin_neon_vld4q_dup_v: {
11078 llvm::Type *PTy =
11079 llvm::PointerType::getUnqual(VTy->getElementType());
11080 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11081 llvm::Type *Tys[2] = { VTy, PTy };
11082 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
11083 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
11084 Ops[0] = Builder.CreateBitCast(Ops[0],
11085 llvm::PointerType::getUnqual(Ops[1]->getType()));
11086 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11087 }
11088 case NEON::BI__builtin_neon_vld2_lane_v:
11089 case NEON::BI__builtin_neon_vld2q_lane_v: {
11090 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
11091 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
11092 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
11093 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11094 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11095 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
11096 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
11097 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11098 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11099 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11100 }
11101 case NEON::BI__builtin_neon_vld3_lane_v:
11102 case NEON::BI__builtin_neon_vld3q_lane_v: {
11103 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
11104 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
11105 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
11106 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11107 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11108 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
11109 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
11110 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
11111 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11112 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11113 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11114 }
11115 case NEON::BI__builtin_neon_vld4_lane_v:
11116 case NEON::BI__builtin_neon_vld4q_lane_v: {
11117 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
11118 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
11119 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
11120 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11121 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11122 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
11123 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
11124 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
11125 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
11126 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11127 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11128 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11129 }
11130 case NEON::BI__builtin_neon_vst2_v:
11131 case NEON::BI__builtin_neon_vst2q_v: {
11132 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11133 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
11134 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
11135 Ops, "");
11136 }
11137 case NEON::BI__builtin_neon_vst2_lane_v:
11138 case NEON::BI__builtin_neon_vst2q_lane_v: {
11139 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11140 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
11141 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
11142 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
11143 Ops, "");
11144 }
11145 case NEON::BI__builtin_neon_vst3_v:
11146 case NEON::BI__builtin_neon_vst3q_v: {
11147 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11148 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
11149 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
11150 Ops, "");
11151 }
11152 case NEON::BI__builtin_neon_vst3_lane_v:
11153 case NEON::BI__builtin_neon_vst3q_lane_v: {
11154 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11155 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
11156 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
11157 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
11158 Ops, "");
11159 }
11160 case NEON::BI__builtin_neon_vst4_v:
11161 case NEON::BI__builtin_neon_vst4q_v: {
11162 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11163 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
11164 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
11165 Ops, "");
11166 }
11167 case NEON::BI__builtin_neon_vst4_lane_v:
11168 case NEON::BI__builtin_neon_vst4q_lane_v: {
11169 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11170 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
11171 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
11172 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
11173 Ops, "");
11174 }
11175 case NEON::BI__builtin_neon_vtrn_v:
11176 case NEON::BI__builtin_neon_vtrnq_v: {
11177 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11178 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11179 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11180 Value *SV = nullptr;
11181
11182 for (unsigned vi = 0; vi != 2; ++vi) {
11183 SmallVector<int, 16> Indices;
11184 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
11185 Indices.push_back(i+vi);
11186 Indices.push_back(i+e+vi);
11187 }
11188 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11189 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
11190 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11191 }
11192 return SV;
11193 }
11194 case NEON::BI__builtin_neon_vuzp_v:
11195 case NEON::BI__builtin_neon_vuzpq_v: {
11196 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11197 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11198 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11199 Value *SV = nullptr;
11200
11201 for (unsigned vi = 0; vi != 2; ++vi) {
11202 SmallVector<int, 16> Indices;
11203 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
11204 Indices.push_back(2*i+vi);
11205
11206 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11207 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
11208 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11209 }
11210 return SV;
11211 }
11212 case NEON::BI__builtin_neon_vzip_v:
11213 case NEON::BI__builtin_neon_vzipq_v: {
11214 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11215 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11216 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11217 Value *SV = nullptr;
11218
11219 for (unsigned vi = 0; vi != 2; ++vi) {
11220 SmallVector<int, 16> Indices;
11221 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
11222 Indices.push_back((i + vi*e) >> 1);
11223 Indices.push_back(((i + vi*e) >> 1)+e);
11224 }
11225 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11226 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
11227 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11228 }
11229 return SV;
11230 }
11231 case NEON::BI__builtin_neon_vqtbl1q_v: {
11232 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
11233 Ops, "vtbl1");
11234 }
11235 case NEON::BI__builtin_neon_vqtbl2q_v: {
11236 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
11237 Ops, "vtbl2");
11238 }
11239 case NEON::BI__builtin_neon_vqtbl3q_v: {
11240 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
11241 Ops, "vtbl3");
11242 }
11243 case NEON::BI__builtin_neon_vqtbl4q_v: {
11244 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
11245 Ops, "vtbl4");
11246 }
11247 case NEON::BI__builtin_neon_vqtbx1q_v: {
11248 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
11249 Ops, "vtbx1");
11250 }
11251 case NEON::BI__builtin_neon_vqtbx2q_v: {
11252 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
11253 Ops, "vtbx2");
11254 }
11255 case NEON::BI__builtin_neon_vqtbx3q_v: {
11256 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
11257 Ops, "vtbx3");
11258 }
11259 case NEON::BI__builtin_neon_vqtbx4q_v: {
11260 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
11261 Ops, "vtbx4");
11262 }
11263 case NEON::BI__builtin_neon_vsqadd_v:
11264 case NEON::BI__builtin_neon_vsqaddq_v: {
11265 Int = Intrinsic::aarch64_neon_usqadd;
11266 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
11267 }
11268 case NEON::BI__builtin_neon_vuqadd_v:
11269 case NEON::BI__builtin_neon_vuqaddq_v: {
11270 Int = Intrinsic::aarch64_neon_suqadd;
11271 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
11272 }
11273 }
11274}
11275
11276Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
11277 const CallExpr *E) {
11278 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
11279 BuiltinID == BPF::BI__builtin_btf_type_id ||
11280 BuiltinID == BPF::BI__builtin_preserve_type_info ||
11281 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
11282 "unexpected BPF builtin");
11283
11284 // A sequence number, injected into IR builtin functions, to
11285 // prevent CSE given the only difference of the funciton
11286 // may just be the debuginfo metadata.
11287 static uint32_t BuiltinSeqNum;
11288
11289 switch (BuiltinID) {
11290 default:
11291 llvm_unreachable("Unexpected BPF builtin");
11292 case BPF::BI__builtin_preserve_field_info: {
11293 const Expr *Arg = E->getArg(0);
11294 bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
11295
11296 if (!getDebugInfo()) {
11297 CGM.Error(E->getExprLoc(),
11298 "using __builtin_preserve_field_info() without -g");
11299 return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
11300 : EmitLValue(Arg).getPointer(*this);
11301 }
11302
11303 // Enable underlying preserve_*_access_index() generation.
11304 bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
11305 IsInPreservedAIRegion = true;
11306 Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
11307 : EmitLValue(Arg).getPointer(*this);
11308 IsInPreservedAIRegion = OldIsInPreservedAIRegion;
11309
11310 ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11311 Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
11312
11313 // Built the IR for the preserve_field_info intrinsic.
11314 llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
11315 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
11316 {FieldAddr->getType()});
11317 return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
11318 }
11319 case BPF::BI__builtin_btf_type_id:
11320 case BPF::BI__builtin_preserve_type_info: {
11321 if (!getDebugInfo()) {
11322 CGM.Error(E->getExprLoc(), "using builtin function without -g");
11323 return nullptr;
11324 }
11325
11326 const Expr *Arg0 = E->getArg(0);
11327 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
11328 Arg0->getType(), Arg0->getExprLoc());
11329
11330 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11331 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11332 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
11333
11334 llvm::Function *FnDecl;
11335 if (BuiltinID == BPF::BI__builtin_btf_type_id)
11336 FnDecl = llvm::Intrinsic::getDeclaration(
11337 &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
11338 else
11339 FnDecl = llvm::Intrinsic::getDeclaration(
11340 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
11341 CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
11342 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11343 return Fn;
11344 }
11345 case BPF::BI__builtin_preserve_enum_value: {
11346 if (!getDebugInfo()) {
11347 CGM.Error(E->getExprLoc(), "using builtin function without -g");
11348 return nullptr;
11349 }
11350
11351 const Expr *Arg0 = E->getArg(0);
11352 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
11353 Arg0->getType(), Arg0->getExprLoc());
11354
11355 // Find enumerator
11356 const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens());
11357 const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr());
11358 const auto *DR = cast<DeclRefExpr>(CE->getSubExpr());
11359 const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl());
11360
11361 auto &InitVal = Enumerator->getInitVal();
11362 std::string InitValStr;
11363 if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX))
11364 InitValStr = std::to_string(InitVal.getSExtValue());
11365 else
11366 InitValStr = std::to_string(InitVal.getZExtValue());
11367 std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr;
11368 Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr);
11369
11370 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11371 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11372 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
11373
11374 llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration(
11375 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
11376 CallInst *Fn =
11377 Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
11378 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11379 return Fn;
11380 }
11381 }
11382}
11383
11384llvm::Value *CodeGenFunction::
11385BuildVector(ArrayRef<llvm::Value*> Ops) {
11386 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
11387 "Not a power-of-two sized vector!");
11388 bool AllConstants = true;
11389 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
11390 AllConstants &= isa<Constant>(Ops[i]);
11391
11392 // If this is a constant vector, create a ConstantVector.
11393 if (AllConstants) {
11394 SmallVector<llvm::Constant*, 16> CstOps;
11395 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11396 CstOps.push_back(cast<Constant>(Ops[i]));
11397 return llvm::ConstantVector::get(CstOps);
11398 }
11399
11400 // Otherwise, insertelement the values to build the vector.
11401 Value *Result = llvm::UndefValue::get(
11402 llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
11403
11404 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11405 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
11406
11407 return Result;
11408}
11409
11410// Convert the mask from an integer type to a vector of i1.
11411static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
11412 unsigned NumElts) {
11413
11414 auto *MaskTy = llvm::FixedVectorType::get(
11415 CGF.Builder.getInt1Ty(),
11416 cast<IntegerType>(Mask->getType())->getBitWidth());
11417 Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
11418
11419 // If we have less than 8 elements, then the starting mask was an i8 and
11420 // we need to extract down to the right number of elements.
11421 if (NumElts < 8) {
11422 int Indices[4];
11423 for (unsigned i = 0; i != NumElts; ++i)
11424 Indices[i] = i;
11425 MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
11426 makeArrayRef(Indices, NumElts),
11427 "extract");
11428 }
11429 return MaskVec;
11430}
11431
11432static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11433 Align Alignment) {
11434 // Cast the pointer to right type.
11435 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11436 llvm::PointerType::getUnqual(Ops[1]->getType()));
11437
11438 Value *MaskVec = getMaskVecValue(
11439 CGF, Ops[2],
11440 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
11441
11442 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
11443}
11444
11445static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11446 Align Alignment) {
11447 // Cast the pointer to right type.
11448 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11449 llvm::PointerType::getUnqual(Ops[1]->getType()));
11450
11451 Value *MaskVec = getMaskVecValue(
11452 CGF, Ops[2],
11453 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
11454
11455 return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
11456}
11457
11458static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
11459 ArrayRef<Value *> Ops) {
11460 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
11461 llvm::Type *PtrTy = ResultTy->getElementType();
11462
11463 // Cast the pointer to element type.
11464 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11465 llvm::PointerType::getUnqual(PtrTy));
11466
11467 Value *MaskVec = getMaskVecValue(
11468 CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
11469
11470 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
11471 ResultTy);
11472 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
11473}
11474
11475static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
11476 ArrayRef<Value *> Ops,
11477 bool IsCompress) {
11478 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11479
11480 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11481
11482 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
11483 : Intrinsic::x86_avx512_mask_expand;
11484 llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
11485 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
11486}
11487
11488static Value *EmitX86CompressStore(CodeGenFunction &CGF,
11489 ArrayRef<Value *> Ops) {
11490 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11491 llvm::Type *PtrTy = ResultTy->getElementType();
11492
11493 // Cast the pointer to element type.
11494 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11495 llvm::PointerType::getUnqual(PtrTy));
11496
11497 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11498
11499 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
11500 ResultTy);
11501 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
11502}
11503
11504static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
11505 ArrayRef<Value *> Ops,
11506 bool InvertLHS = false) {
11507 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11508 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
11509 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
11510
11511 if (InvertLHS)
11512 LHS = CGF.Builder.CreateNot(LHS);
11513
11514 return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
11515 Ops[0]->getType());
11516}
11517
11518static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
11519 Value *Amt, bool IsRight) {
11520 llvm::Type *Ty = Op0->getType();
11521
11522 // Amount may be scalar immediate, in which case create a splat vector.
11523 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
11524 // we only care about the lowest log2 bits anyway.
11525 if (Amt->getType() != Ty) {
11526 unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements();
11527 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
11528 Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
11529 }
11530
11531 unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
11532 Function *F = CGF.CGM.getIntrinsic(IID, Ty);
11533 return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
11534}
11535
11536static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11537 bool IsSigned) {
11538 Value *Op0 = Ops[0];
11539 Value *Op1 = Ops[1];
11540 llvm::Type *Ty = Op0->getType();
11541 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
11542
11543 CmpInst::Predicate Pred;
11544 switch (Imm) {
11545 case 0x0:
11546 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
11547 break;
11548 case 0x1:
11549 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
11550 break;
11551 case 0x2:
11552 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
11553 break;
11554 case 0x3:
11555 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
11556 break;
11557 case 0x4:
11558 Pred = ICmpInst::ICMP_EQ;
11559 break;
11560 case 0x5:
11561 Pred = ICmpInst::ICMP_NE;
11562 break;
11563 case 0x6:
11564 return llvm::Constant::getNullValue(Ty); // FALSE
11565 case 0x7:
11566 return llvm::Constant::getAllOnesValue(Ty); // TRUE
11567 default:
11568 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
11569 }
11570
11571 Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
11572 Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
11573 return Res;
11574}
11575
11576static Value *EmitX86Select(CodeGenFunction &CGF,
11577 Value *Mask, Value *Op0, Value *Op1) {
11578
11579 // If the mask is all ones just return first argument.
11580 if (const auto *C = dyn_cast<Constant>(Mask))
11581 if (C->isAllOnesValue())
11582 return Op0;
11583
11584 Mask = getMaskVecValue(
11585 CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements());
11586
11587 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
11588}
11589
11590static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
11591 Value *Mask, Value *Op0, Value *Op1) {
11592 // If the mask is all ones just return first argument.
11593 if (const auto *C = dyn_cast<Constant>(Mask))
11594 if (C->isAllOnesValue())
11595 return Op0;
11596
11597 auto *MaskTy = llvm::FixedVectorType::get(
11598 CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
11599 Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
11600 Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
11601 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
11602}
11603
11604static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
11605 unsigned NumElts, Value *MaskIn) {
11606 if (MaskIn) {
11607 const auto *C = dyn_cast<Constant>(MaskIn);
11608 if (!C || !C->isAllOnesValue())
11609 Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
11610 }
11611
11612 if (NumElts < 8) {
11613 int Indices[8];
11614 for (unsigned i = 0; i != NumElts; ++i)
11615 Indices[i] = i;
11616 for (unsigned i = NumElts; i != 8; ++i)
11617 Indices[i] = i % NumElts + NumElts;
11618 Cmp = CGF.Builder.CreateShuffleVector(
11619 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
11620 }
11621
11622 return CGF.Builder.CreateBitCast(Cmp,
11623 IntegerType::get(CGF.getLLVMContext(),
11624 std::max(NumElts, 8U)));
11625}
11626
11627static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
11628 bool Signed, ArrayRef<Value *> Ops) {
11629 assert((Ops.size() == 2 || Ops.size() == 4) &&
11630 "Unexpected number of arguments");
11631 unsigned NumElts =
11632 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
11633 Value *Cmp;
11634
11635 if (CC == 3) {
11636 Cmp = Constant::getNullValue(
11637 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
11638 } else if (CC == 7) {
11639 Cmp = Constant::getAllOnesValue(
11640 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
11641 } else {
11642 ICmpInst::Predicate Pred;
11643 switch (CC) {
11644 default: llvm_unreachable("Unknown condition code");
11645 case 0: Pred = ICmpInst::ICMP_EQ; break;
11646 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
11647 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
11648 case 4: Pred = ICmpInst::ICMP_NE; break;
11649 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
11650 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
11651 }
11652 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
11653 }
11654
11655 Value *MaskIn = nullptr;
11656 if (Ops.size() == 4)
11657 MaskIn = Ops[3];
11658
11659 return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
11660}
11661
11662static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
11663 Value *Zero = Constant::getNullValue(In->getType());
11664 return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
11665}
11666
11667static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
11668 ArrayRef<Value *> Ops, bool IsSigned) {
11669 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
11670 llvm::Type *Ty = Ops[1]->getType();
11671
11672 Value *Res;
11673 if (Rnd != 4) {
11674 Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
11675 : Intrinsic::x86_avx512_uitofp_round;
11676 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
11677 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
11678 } else {
11679 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
11680 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
11681 }
11682
11683 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
11684}
11685
11686// Lowers X86 FMA intrinsics to IR.
11687static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11688 unsigned BuiltinID, bool IsAddSub) {
11689
11690 bool Subtract = false;
11691 Intrinsic::ID IID = Intrinsic::not_intrinsic;
11692 switch (BuiltinID) {
11693 default: break;
11694 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
11695 Subtract = true;
11696 LLVM_FALLTHROUGH;
11697 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
11698 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
11699 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
11700 IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
11701 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
11702 Subtract = true;
11703 LLVM_FALLTHROUGH;
11704 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
11705 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
11706 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
11707 IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
11708 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
11709 Subtract = true;
11710 LLVM_FALLTHROUGH;
11711 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
11712 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
11713 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
11714 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
11715 break;
11716 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
11717 Subtract = true;
11718 LLVM_FALLTHROUGH;
11719 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
11720 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
11721 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
11722 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
11723 break;
11724 }
11725
11726 Value *A = Ops[0];
11727 Value *B = Ops[1];
11728 Value *C = Ops[2];
11729
11730 if (Subtract)
11731 C = CGF.Builder.CreateFNeg(C);
11732
11733 Value *Res;
11734
11735 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
11736 if (IID != Intrinsic::not_intrinsic &&
11737 (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
11738 IsAddSub)) {
11739 Function *Intr = CGF.CGM.getIntrinsic(IID);
11740 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
11741 } else {
11742 llvm::Type *Ty = A->getType();
11743 Function *FMA;
11744 if (CGF.Builder.getIsFPConstrained()) {
11745 FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
11746 Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
11747 } else {
11748 FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
11749 Res = CGF.Builder.CreateCall(FMA, {A, B, C});
11750 }
11751 }
11752
11753 // Handle any required masking.
11754 Value *MaskFalseVal = nullptr;
11755 switch (BuiltinID) {
11756 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
11757 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
11758 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
11759 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
11760 MaskFalseVal = Ops[0];
11761 break;
11762 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
11763 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
11764 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
11765 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
11766 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
11767 break;
11768 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
11769 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
11770 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
11771 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
11772 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
11773 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
11774 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
11775 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
11776 MaskFalseVal = Ops[2];
11777 break;
11778 }
11779
11780 if (MaskFalseVal)
11781 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
11782
11783 return Res;
11784}
11785
11786static Value *
11787EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
11788 Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0,
11789 bool NegAcc = false) {
11790 unsigned Rnd = 4;
11791 if (Ops.size() > 4)
11792 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
11793
11794 if (NegAcc)
11795 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
11796
11797 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
11798 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
11799 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
11800 Value *Res;
11801 if (Rnd != 4) {
11802 Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
11803 Intrinsic::x86_avx512_vfmadd_f32 :
11804 Intrinsic::x86_avx512_vfmadd_f64;
11805 Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
11806 {Ops[0], Ops[1], Ops[2], Ops[4]});
11807 } else if (CGF.Builder.getIsFPConstrained()) {
11808 Function *FMA = CGF.CGM.getIntrinsic(
11809 Intrinsic::experimental_constrained_fma, Ops[0]->getType());
11810 Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
11811 } else {
11812 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
11813 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
11814 }
11815 // If we have more than 3 arguments, we need to do masking.
11816 if (Ops.size() > 3) {
11817 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
11818 : Ops[PTIdx];
11819
11820 // If we negated the accumulator and the its the PassThru value we need to
11821 // bypass the negate. Conveniently Upper should be the same thing in this
11822 // case.
11823 if (NegAcc && PTIdx == 2)
11824 PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
11825
11826 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
11827 }
11828 return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
11829}
11830
11831static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
11832 ArrayRef<Value *> Ops) {
11833 llvm::Type *Ty = Ops[0]->getType();
11834 // Arguments have a vXi32 type so cast to vXi64.
11835 Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
11836 Ty->getPrimitiveSizeInBits() / 64);
11837 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
11838 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
11839
11840 if (IsSigned) {
11841 // Shift left then arithmetic shift right.
11842 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
11843 LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
11844 LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
11845 RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
11846 RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
11847 } else {
11848 // Clear the upper bits.
11849 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
11850 LHS = CGF.Builder.CreateAnd(LHS, Mask);
11851 RHS = CGF.Builder.CreateAnd(RHS, Mask);
11852 }
11853
11854 return CGF.Builder.CreateMul(LHS, RHS);
11855}
11856
11857// Emit a masked pternlog intrinsic. This only exists because the header has to
11858// use a macro and we aren't able to pass the input argument to a pternlog
11859// builtin and a select builtin without evaluating it twice.
11860static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
11861 ArrayRef<Value *> Ops) {
11862 llvm::Type *Ty = Ops[0]->getType();
11863
11864 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
11865 unsigned EltWidth = Ty->getScalarSizeInBits();
11866 Intrinsic::ID IID;
11867 if (VecWidth == 128 && EltWidth == 32)
11868 IID = Intrinsic::x86_avx512_pternlog_d_128;
11869 else if (VecWidth == 256 && EltWidth == 32)
11870 IID = Intrinsic::x86_avx512_pternlog_d_256;
11871 else if (VecWidth == 512 && EltWidth == 32)
11872 IID = Intrinsic::x86_avx512_pternlog_d_512;
11873 else if (VecWidth == 128 && EltWidth == 64)
11874 IID = Intrinsic::x86_avx512_pternlog_q_128;
11875 else if (VecWidth == 256 && EltWidth == 64)
11876 IID = Intrinsic::x86_avx512_pternlog_q_256;
11877 else if (VecWidth == 512 && EltWidth == 64)
11878 IID = Intrinsic::x86_avx512_pternlog_q_512;
11879 else
11880 llvm_unreachable("Unexpected intrinsic");
11881
11882 Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
11883 Ops.drop_back());
11884 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
11885 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
11886}
11887
11888static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
11889 llvm::Type *DstTy) {
11890 unsigned NumberOfElements =
11891 cast<llvm::FixedVectorType>(DstTy)->getNumElements();
11892 Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
11893 return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
11894}
11895
11896// Emit binary intrinsic with the same type used in result/args.
11897static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF,
11898 ArrayRef<Value *> Ops, Intrinsic::ID IID) {
11899 llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
11900 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
11901}
11902
11903Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
11904 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
11905 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
11906 return EmitX86CpuIs(CPUStr);
11907}
11908
11909// Convert F16 halfs to floats.
11910static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
11911 ArrayRef<Value *> Ops,
11912 llvm::Type *DstTy) {
11913 assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
11914 "Unknown cvtph2ps intrinsic");
11915
11916 // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
11917 if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
11918 Function *F =
11919 CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
11920 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
11921 }
11922
11923 unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
11924 Value *Src = Ops[0];
11925
11926 // Extract the subvector.
11927 if (NumDstElts !=
11928 cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
11929 assert(NumDstElts == 4 && "Unexpected vector size");
11930 Src = CGF.Builder.CreateShuffleVector(Src, ArrayRef<int>{0, 1, 2, 3});
11931 }
11932
11933 // Bitcast from vXi16 to vXf16.
11934 auto *HalfTy = llvm::FixedVectorType::get(
11935 llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
11936 Src = CGF.Builder.CreateBitCast(Src, HalfTy);
11937
11938 // Perform the fp-extension.
11939 Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
11940
11941 if (Ops.size() >= 3)
11942 Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
11943 return Res;
11944}
11945
11946// Convert a BF16 to a float.
11947static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
11948 const CallExpr *E,
11949 ArrayRef<Value *> Ops) {
11950 llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
11951 Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
11952 Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
11953 llvm::Type *ResultType = CGF.ConvertType(E->getType());
11954 Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
11955 return BitCast;
11956}
11957
11958Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
11959
11960 llvm::Type *Int32Ty = Builder.getInt32Ty();
11961
11962 // Matching the struct layout from the compiler-rt/libgcc structure that is
11963 // filled in:
11964 // unsigned int __cpu_vendor;
11965 // unsigned int __cpu_type;
11966 // unsigned int __cpu_subtype;
11967 // unsigned int __cpu_features[1];
11968 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
11969 llvm::ArrayType::get(Int32Ty, 1));
11970
11971 // Grab the global __cpu_model.
11972 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
11973 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
11974
11975 // Calculate the index needed to access the correct field based on the
11976 // range. Also adjust the expected value.
11977 unsigned Index;
11978 unsigned Value;
11979 std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
11980#define X86_VENDOR(ENUM, STRING) \
11981 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
11982#define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
11983 .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
11984#define X86_CPU_TYPE(ENUM, STR) \
11985 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
11986#define X86_CPU_SUBTYPE(ENUM, STR) \
11987 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
11988#include "llvm/Support/X86TargetParser.def"
11989 .Default({0, 0});
11990 assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
11991
11992 // Grab the appropriate field from __cpu_model.
11993 llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
11994 ConstantInt::get(Int32Ty, Index)};
11995 llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
11996 CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4));
11997
11998 // Check the value of the field against the requested value.
11999 return Builder.CreateICmpEQ(CpuValue,
12000 llvm::ConstantInt::get(Int32Ty, Value));
12001}
12002
12003Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
12004 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
12005 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
12006 return EmitX86CpuSupports(FeatureStr);
12007}
12008
12009uint64_t
12010CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
12011 // Processor features and mapping to processor feature value.
12012 uint64_t FeaturesMask = 0;
12013 for (const StringRef &FeatureStr : FeatureStrs) {
12014 unsigned Feature =
12015 StringSwitch<unsigned>(FeatureStr)
12016#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM)
12017#include "llvm/Support/X86TargetParser.def"
12018 ;
12019 FeaturesMask |= (1ULL << Feature);
12020 }
12021 return FeaturesMask;
12022}
12023
12024Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
12025 return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
12026}
12027
12028llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
12029 uint32_t Features1 = Lo_32(FeaturesMask);
12030 uint32_t Features2 = Hi_32(FeaturesMask);
12031
12032 Value *Result = Builder.getTrue();
12033
12034 if (Features1 != 0) {
12035 // Matching the struct layout from the compiler-rt/libgcc structure that is
12036 // filled in:
12037 // unsigned int __cpu_vendor;
12038 // unsigned int __cpu_type;
12039 // unsigned int __cpu_subtype;
12040 // unsigned int __cpu_features[1];
12041 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
12042 llvm::ArrayType::get(Int32Ty, 1));
12043
12044 // Grab the global __cpu_model.
12045 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
12046 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
12047
12048 // Grab the first (0th) element from the field __cpu_features off of the
12049 // global in the struct STy.
12050 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
12051 Builder.getInt32(0)};
12052 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
12053 Value *Features =
12054 Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
12055
12056 // Check the value of the bit corresponding to the feature requested.
12057 Value *Mask = Builder.getInt32(Features1);
12058 Value *Bitset = Builder.CreateAnd(Features, Mask);
12059 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
12060 Result = Builder.CreateAnd(Result, Cmp);
12061 }
12062
12063 if (Features2 != 0) {
12064 llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
12065 "__cpu_features2");
12066 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
12067
12068 Value *Features =
12069 Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4));
12070
12071 // Check the value of the bit corresponding to the feature requested.
12072 Value *Mask = Builder.getInt32(Features2);
12073 Value *Bitset = Builder.CreateAnd(Features, Mask);
12074 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
12075 Result = Builder.CreateAnd(Result, Cmp);
12076 }
12077
12078 return Result;
12079}
12080
12081Value *CodeGenFunction::EmitX86CpuInit() {
12082 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
12083 /*Variadic*/ false);
12084 llvm::FunctionCallee Func =
12085 CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
12086 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
12087 cast<llvm::GlobalValue>(Func.getCallee())
12088 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
12089 return Builder.CreateCall(Func);
12090}
12091
12092Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
12093 const CallExpr *E) {
12094 if (BuiltinID == X86::BI__builtin_cpu_is)
12095 return EmitX86CpuIs(E);
12096 if (BuiltinID == X86::BI__builtin_cpu_supports)
12097 return EmitX86CpuSupports(E);
12098 if (BuiltinID == X86::BI__builtin_cpu_init)
12099 return EmitX86CpuInit();
12100
12101 // Handle MSVC intrinsics before argument evaluation to prevent double
12102 // evaluation.
12103 if (Optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
12104 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
12105
12106 SmallVector<Value*, 4> Ops;
12107 bool IsMaskFCmp = false;
12108
12109 // Find out if any arguments are required to be integer constant expressions.
12110 unsigned ICEArguments = 0;
12111 ASTContext::GetBuiltinTypeError Error;
12112 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
12113 assert(Error == ASTContext::GE_None && "Should not codegen an error");
12114
12115 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
12116 // If this is a normal argument, just emit it as a scalar.
12117 if ((ICEArguments & (1 << i)) == 0) {
12118 Ops.push_back(EmitScalarExpr(E->getArg(i)));
12119 continue;
12120 }
12121
12122 // If this is required to be a constant, constant fold it so that we know
12123 // that the generated intrinsic gets a ConstantInt.
12124 Ops.push_back(llvm::ConstantInt::get(
12125 getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
12126 }
12127
12128 // These exist so that the builtin that takes an immediate can be bounds
12129 // checked by clang to avoid passing bad immediates to the backend. Since
12130 // AVX has a larger immediate than SSE we would need separate builtins to
12131 // do the different bounds checking. Rather than create a clang specific
12132 // SSE only builtin, this implements eight separate builtins to match gcc
12133 // implementation.
12134 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
12135 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
12136 llvm::Function *F = CGM.getIntrinsic(ID);
12137 return Builder.CreateCall(F, Ops);
12138 };
12139
12140 // For the vector forms of FP comparisons, translate the builtins directly to
12141 // IR.
12142 // TODO: The builtins could be removed if the SSE header files used vector
12143 // extension comparisons directly (vector ordered/unordered may need
12144 // additional support via __builtin_isnan()).
12145 auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred,
12146 bool IsSignaling) {
12147 Value *Cmp;
12148 if (IsSignaling)
12149 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
12150 else
12151 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
12152 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
12153 llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
12154 Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
12155 return Builder.CreateBitCast(Sext, FPVecTy);
12156 };
12157
12158 switch (BuiltinID) {
12159 default: return nullptr;
12160 case X86::BI_mm_prefetch: {
12161 Value *Address = Ops[0];
12162 ConstantInt *C = cast<ConstantInt>(Ops[1]);
12163 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
12164 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
12165 Value *Data = ConstantInt::get(Int32Ty, 1);
12166 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
12167 return Builder.CreateCall(F, {Address, RW, Locality, Data});
12168 }
12169 case X86::BI_mm_clflush: {
12170 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
12171 Ops[0]);
12172 }
12173 case X86::BI_mm_lfence: {
12174 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
12175 }
12176 case X86::BI_mm_mfence: {
12177 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
12178 }
12179 case X86::BI_mm_sfence: {
12180 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
12181 }
12182 case X86::BI_mm_pause: {
12183 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
12184 }
12185 case X86::BI__rdtsc: {
12186 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
12187 }
12188 case X86::BI__builtin_ia32_rdtscp: {
12189 Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
12190 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
12191 Ops[0]);
12192 return Builder.CreateExtractValue(Call, 0);
12193 }
12194 case X86::BI__builtin_ia32_lzcnt_u16:
12195 case X86::BI__builtin_ia32_lzcnt_u32:
12196 case X86::BI__builtin_ia32_lzcnt_u64: {
12197 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
12198 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
12199 }
12200 case X86::BI__builtin_ia32_tzcnt_u16:
12201 case X86::BI__builtin_ia32_tzcnt_u32:
12202 case X86::BI__builtin_ia32_tzcnt_u64: {
12203 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
12204 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
12205 }
12206 case X86::BI__builtin_ia32_undef128:
12207 case X86::BI__builtin_ia32_undef256:
12208 case X86::BI__builtin_ia32_undef512:
12209 // The x86 definition of "undef" is not the same as the LLVM definition
12210 // (PR32176). We leave optimizing away an unnecessary zero constant to the
12211 // IR optimizer and backend.
12212 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
12213 // value, we should use that here instead of a zero.
12214 return llvm::Constant::getNullValue(ConvertType(E->getType()));
12215 case X86::BI__builtin_ia32_vec_init_v8qi:
12216 case X86::BI__builtin_ia32_vec_init_v4hi:
12217 case X86::BI__builtin_ia32_vec_init_v2si:
12218 return Builder.CreateBitCast(BuildVector(Ops),
12219 llvm::Type::getX86_MMXTy(getLLVMContext()));
12220 case X86::BI__builtin_ia32_vec_ext_v2si:
12221 case X86::BI__builtin_ia32_vec_ext_v16qi:
12222 case X86::BI__builtin_ia32_vec_ext_v8hi:
12223 case X86::BI__builtin_ia32_vec_ext_v4si:
12224 case X86::BI__builtin_ia32_vec_ext_v4sf:
12225 case X86::BI__builtin_ia32_vec_ext_v2di:
12226 case X86::BI__builtin_ia32_vec_ext_v32qi:
12227 case X86::BI__builtin_ia32_vec_ext_v16hi:
12228 case X86::BI__builtin_ia32_vec_ext_v8si:
12229 case X86::BI__builtin_ia32_vec_ext_v4di: {
12230 unsigned NumElts =
12231 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12232 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
12233 Index &= NumElts - 1;
12234 // These builtins exist so we can ensure the index is an ICE and in range.
12235 // Otherwise we could just do this in the header file.
12236 return Builder.CreateExtractElement(Ops[0], Index);
12237 }
12238 case X86::BI__builtin_ia32_vec_set_v16qi:
12239 case X86::BI__builtin_ia32_vec_set_v8hi:
12240 case X86::BI__builtin_ia32_vec_set_v4si:
12241 case X86::BI__builtin_ia32_vec_set_v2di:
12242 case X86::BI__builtin_ia32_vec_set_v32qi:
12243 case X86::BI__builtin_ia32_vec_set_v16hi:
12244 case X86::BI__builtin_ia32_vec_set_v8si:
12245 case X86::BI__builtin_ia32_vec_set_v4di: {
12246 unsigned NumElts =
12247 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12248 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
12249 Index &= NumElts - 1;
12250 // These builtins exist so we can ensure the index is an ICE and in range.
12251 // Otherwise we could just do this in the header file.
12252 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
12253 }
12254 case X86::BI_mm_setcsr:
12255 case X86::BI__builtin_ia32_ldmxcsr: {
12256 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
12257 Builder.CreateStore(Ops[0], Tmp);
12258 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
12259 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
12260 }
12261 case X86::BI_mm_getcsr:
12262 case X86::BI__builtin_ia32_stmxcsr: {
12263 Address Tmp = CreateMemTemp(E->getType());
12264 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
12265 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
12266 return Builder.CreateLoad(Tmp, "stmxcsr");
12267 }
12268 case X86::BI__builtin_ia32_xsave:
12269 case X86::BI__builtin_ia32_xsave64:
12270 case X86::BI__builtin_ia32_xrstor:
12271 case X86::BI__builtin_ia32_xrstor64:
12272 case X86::BI__builtin_ia32_xsaveopt:
12273 case X86::BI__builtin_ia32_xsaveopt64:
12274 case X86::BI__builtin_ia32_xrstors:
12275 case X86::BI__builtin_ia32_xrstors64:
12276 case X86::BI__builtin_ia32_xsavec:
12277 case X86::BI__builtin_ia32_xsavec64:
12278 case X86::BI__builtin_ia32_xsaves:
12279 case X86::BI__builtin_ia32_xsaves64:
12280 case X86::BI__builtin_ia32_xsetbv:
12281 case X86::BI_xsetbv: {
12282 Intrinsic::ID ID;
12283#define INTRINSIC_X86_XSAVE_ID(NAME) \
12284 case X86::BI__builtin_ia32_##NAME: \
12285 ID = Intrinsic::x86_##NAME; \
12286 break
12287 switch (BuiltinID) {
12288 default: llvm_unreachable("Unsupported intrinsic!");
12289 INTRINSIC_X86_XSAVE_ID(xsave);
12290 INTRINSIC_X86_XSAVE_ID(xsave64);
12291 INTRINSIC_X86_XSAVE_ID(xrstor);
12292 INTRINSIC_X86_XSAVE_ID(xrstor64);
12293 INTRINSIC_X86_XSAVE_ID(xsaveopt);
12294 INTRINSIC_X86_XSAVE_ID(xsaveopt64);
12295 INTRINSIC_X86_XSAVE_ID(xrstors);
12296 INTRINSIC_X86_XSAVE_ID(xrstors64);
12297 INTRINSIC_X86_XSAVE_ID(xsavec);
12298 INTRINSIC_X86_XSAVE_ID(xsavec64);
12299 INTRINSIC_X86_XSAVE_ID(xsaves);
12300 INTRINSIC_X86_XSAVE_ID(xsaves64);
12301 INTRINSIC_X86_XSAVE_ID(xsetbv);
12302 case X86::BI_xsetbv:
12303 ID = Intrinsic::x86_xsetbv;
12304 break;
12305 }
12306#undef INTRINSIC_X86_XSAVE_ID
12307 Value *Mhi = Builder.CreateTrunc(
12308 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
12309 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
12310 Ops[1] = Mhi;
12311 Ops.push_back(Mlo);
12312 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
12313 }
12314 case X86::BI__builtin_ia32_xgetbv:
12315 case X86::BI_xgetbv:
12316 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
12317 case X86::BI__builtin_ia32_storedqudi128_mask:
12318 case X86::BI__builtin_ia32_storedqusi128_mask:
12319 case X86::BI__builtin_ia32_storedquhi128_mask:
12320 case X86::BI__builtin_ia32_storedquqi128_mask:
12321 case X86::BI__builtin_ia32_storeupd128_mask:
12322 case X86::BI__builtin_ia32_storeups128_mask:
12323 case X86::BI__builtin_ia32_storedqudi256_mask:
12324 case X86::BI__builtin_ia32_storedqusi256_mask:
12325 case X86::BI__builtin_ia32_storedquhi256_mask:
12326 case X86::BI__builtin_ia32_storedquqi256_mask:
12327 case X86::BI__builtin_ia32_storeupd256_mask:
12328 case X86::BI__builtin_ia32_storeups256_mask:
12329 case X86::BI__builtin_ia32_storedqudi512_mask:
12330 case X86::BI__builtin_ia32_storedqusi512_mask:
12331 case X86::BI__builtin_ia32_storedquhi512_mask:
12332 case X86::BI__builtin_ia32_storedquqi512_mask:
12333 case X86::BI__builtin_ia32_storeupd512_mask:
12334 case X86::BI__builtin_ia32_storeups512_mask:
12335 return EmitX86MaskedStore(*this, Ops, Align(1));
12336
12337 case X86::BI__builtin_ia32_storess128_mask:
12338 case X86::BI__builtin_ia32_storesd128_mask:
12339 return EmitX86MaskedStore(*this, Ops, Align(1));
12340
12341 case X86::BI__builtin_ia32_vpopcntb_128:
12342 case X86::BI__builtin_ia32_vpopcntd_128:
12343 case X86::BI__builtin_ia32_vpopcntq_128:
12344 case X86::BI__builtin_ia32_vpopcntw_128:
12345 case X86::BI__builtin_ia32_vpopcntb_256:
12346 case X86::BI__builtin_ia32_vpopcntd_256:
12347 case X86::BI__builtin_ia32_vpopcntq_256:
12348 case X86::BI__builtin_ia32_vpopcntw_256:
12349 case X86::BI__builtin_ia32_vpopcntb_512:
12350 case X86::BI__builtin_ia32_vpopcntd_512:
12351 case X86::BI__builtin_ia32_vpopcntq_512:
12352 case X86::BI__builtin_ia32_vpopcntw_512: {
12353 llvm::Type *ResultType = ConvertType(E->getType());
12354 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
12355 return Builder.CreateCall(F, Ops);
12356 }
12357 case X86::BI__builtin_ia32_cvtmask2b128:
12358 case X86::BI__builtin_ia32_cvtmask2b256:
12359 case X86::BI__builtin_ia32_cvtmask2b512:
12360 case X86::BI__builtin_ia32_cvtmask2w128:
12361 case X86::BI__builtin_ia32_cvtmask2w256:
12362 case X86::BI__builtin_ia32_cvtmask2w512:
12363 case X86::BI__builtin_ia32_cvtmask2d128:
12364 case X86::BI__builtin_ia32_cvtmask2d256:
12365 case X86::BI__builtin_ia32_cvtmask2d512:
12366 case X86::BI__builtin_ia32_cvtmask2q128:
12367 case X86::BI__builtin_ia32_cvtmask2q256:
12368 case X86::BI__builtin_ia32_cvtmask2q512:
12369 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
12370
12371 case X86::BI__builtin_ia32_cvtb2mask128:
12372 case X86::BI__builtin_ia32_cvtb2mask256:
12373 case X86::BI__builtin_ia32_cvtb2mask512:
12374 case X86::BI__builtin_ia32_cvtw2mask128:
12375 case X86::BI__builtin_ia32_cvtw2mask256:
12376 case X86::BI__builtin_ia32_cvtw2mask512:
12377 case X86::BI__builtin_ia32_cvtd2mask128:
12378 case X86::BI__builtin_ia32_cvtd2mask256:
12379 case X86::BI__builtin_ia32_cvtd2mask512:
12380 case X86::BI__builtin_ia32_cvtq2mask128:
12381 case X86::BI__builtin_ia32_cvtq2mask256:
12382 case X86::BI__builtin_ia32_cvtq2mask512:
12383 return EmitX86ConvertToMask(*this, Ops[0]);
12384
12385 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
12386 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
12387 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
12388 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/true);
12389 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
12390 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
12391 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
12392 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/false);
12393
12394 case X86::BI__builtin_ia32_vfmaddss3:
12395 case X86::BI__builtin_ia32_vfmaddsd3:
12396 case X86::BI__builtin_ia32_vfmaddss3_mask:
12397 case X86::BI__builtin_ia32_vfmaddsd3_mask:
12398 return EmitScalarFMAExpr(*this, Ops, Ops[0]);
12399 case X86::BI__builtin_ia32_vfmaddss:
12400 case X86::BI__builtin_ia32_vfmaddsd:
12401 return EmitScalarFMAExpr(*this, Ops,
12402 Constant::getNullValue(Ops[0]->getType()));
12403 case X86::BI__builtin_ia32_vfmaddss3_maskz:
12404 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
12405 return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true);
12406 case X86::BI__builtin_ia32_vfmaddss3_mask3:
12407 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
12408 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2);
12409 case X86::BI__builtin_ia32_vfmsubss3_mask3:
12410 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
12411 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2,
12412 /*NegAcc*/true);
12413 case X86::BI__builtin_ia32_vfmaddps:
12414 case X86::BI__builtin_ia32_vfmaddpd:
12415 case X86::BI__builtin_ia32_vfmaddps256:
12416 case X86::BI__builtin_ia32_vfmaddpd256:
12417 case X86::BI__builtin_ia32_vfmaddps512_mask:
12418 case X86::BI__builtin_ia32_vfmaddps512_maskz:
12419 case X86::BI__builtin_ia32_vfmaddps512_mask3:
12420 case X86::BI__builtin_ia32_vfmsubps512_mask3:
12421 case X86::BI__builtin_ia32_vfmaddpd512_mask:
12422 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
12423 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
12424 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
12425 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
12426 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
12427 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12428 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12429 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12430 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12431 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12432 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12433 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12434 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true);
12435
12436 case X86::BI__builtin_ia32_movdqa32store128_mask:
12437 case X86::BI__builtin_ia32_movdqa64store128_mask:
12438 case X86::BI__builtin_ia32_storeaps128_mask:
12439 case X86::BI__builtin_ia32_storeapd128_mask:
12440 case X86::BI__builtin_ia32_movdqa32store256_mask:
12441 case X86::BI__builtin_ia32_movdqa64store256_mask:
12442 case X86::BI__builtin_ia32_storeaps256_mask:
12443 case X86::BI__builtin_ia32_storeapd256_mask:
12444 case X86::BI__builtin_ia32_movdqa32store512_mask:
12445 case X86::BI__builtin_ia32_movdqa64store512_mask:
12446 case X86::BI__builtin_ia32_storeaps512_mask:
12447 case X86::BI__builtin_ia32_storeapd512_mask:
12448 return EmitX86MaskedStore(
12449 *this, Ops,
12450 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12451
12452 case X86::BI__builtin_ia32_loadups128_mask:
12453 case X86::BI__builtin_ia32_loadups256_mask:
12454 case X86::BI__builtin_ia32_loadups512_mask:
12455 case X86::BI__builtin_ia32_loadupd128_mask:
12456 case X86::BI__builtin_ia32_loadupd256_mask:
12457 case X86::BI__builtin_ia32_loadupd512_mask:
12458 case X86::BI__builtin_ia32_loaddquqi128_mask:
12459 case X86::BI__builtin_ia32_loaddquqi256_mask:
12460 case X86::BI__builtin_ia32_loaddquqi512_mask:
12461 case X86::BI__builtin_ia32_loaddquhi128_mask:
12462 case X86::BI__builtin_ia32_loaddquhi256_mask:
12463 case X86::BI__builtin_ia32_loaddquhi512_mask:
12464 case X86::BI__builtin_ia32_loaddqusi128_mask:
12465 case X86::BI__builtin_ia32_loaddqusi256_mask:
12466 case X86::BI__builtin_ia32_loaddqusi512_mask:
12467 case X86::BI__builtin_ia32_loaddqudi128_mask:
12468 case X86::BI__builtin_ia32_loaddqudi256_mask:
12469 case X86::BI__builtin_ia32_loaddqudi512_mask:
12470 return EmitX86MaskedLoad(*this, Ops, Align(1));
12471
12472 case X86::BI__builtin_ia32_loadss128_mask:
12473 case X86::BI__builtin_ia32_loadsd128_mask:
12474 return EmitX86MaskedLoad(*this, Ops, Align(1));
12475
12476 case X86::BI__builtin_ia32_loadaps128_mask:
12477 case X86::BI__builtin_ia32_loadaps256_mask:
12478 case X86::BI__builtin_ia32_loadaps512_mask:
12479 case X86::BI__builtin_ia32_loadapd128_mask:
12480 case X86::BI__builtin_ia32_loadapd256_mask:
12481 case X86::BI__builtin_ia32_loadapd512_mask:
12482 case X86::BI__builtin_ia32_movdqa32load128_mask:
12483 case X86::BI__builtin_ia32_movdqa32load256_mask:
12484 case X86::BI__builtin_ia32_movdqa32load512_mask:
12485 case X86::BI__builtin_ia32_movdqa64load128_mask:
12486 case X86::BI__builtin_ia32_movdqa64load256_mask:
12487 case X86::BI__builtin_ia32_movdqa64load512_mask:
12488 return EmitX86MaskedLoad(
12489 *this, Ops,
12490 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12491
12492 case X86::BI__builtin_ia32_expandloaddf128_mask:
12493 case X86::BI__builtin_ia32_expandloaddf256_mask:
12494 case X86::BI__builtin_ia32_expandloaddf512_mask:
12495 case X86::BI__builtin_ia32_expandloadsf128_mask:
12496 case X86::BI__builtin_ia32_expandloadsf256_mask:
12497 case X86::BI__builtin_ia32_expandloadsf512_mask:
12498 case X86::BI__builtin_ia32_expandloaddi128_mask:
12499 case X86::BI__builtin_ia32_expandloaddi256_mask:
12500 case X86::BI__builtin_ia32_expandloaddi512_mask:
12501 case X86::BI__builtin_ia32_expandloadsi128_mask:
12502 case X86::BI__builtin_ia32_expandloadsi256_mask:
12503 case X86::BI__builtin_ia32_expandloadsi512_mask:
12504 case X86::BI__builtin_ia32_expandloadhi128_mask:
12505 case X86::BI__builtin_ia32_expandloadhi256_mask:
12506 case X86::BI__builtin_ia32_expandloadhi512_mask:
12507 case X86::BI__builtin_ia32_expandloadqi128_mask:
12508 case X86::BI__builtin_ia32_expandloadqi256_mask:
12509 case X86::BI__builtin_ia32_expandloadqi512_mask:
12510 return EmitX86ExpandLoad(*this, Ops);
12511
12512 case X86::BI__builtin_ia32_compressstoredf128_mask:
12513 case X86::BI__builtin_ia32_compressstoredf256_mask:
12514 case X86::BI__builtin_ia32_compressstoredf512_mask:
12515 case X86::BI__builtin_ia32_compressstoresf128_mask:
12516 case X86::BI__builtin_ia32_compressstoresf256_mask:
12517 case X86::BI__builtin_ia32_compressstoresf512_mask:
12518 case X86::BI__builtin_ia32_compressstoredi128_mask:
12519 case X86::BI__builtin_ia32_compressstoredi256_mask:
12520 case X86::BI__builtin_ia32_compressstoredi512_mask:
12521 case X86::BI__builtin_ia32_compressstoresi128_mask:
12522 case X86::BI__builtin_ia32_compressstoresi256_mask:
12523 case X86::BI__builtin_ia32_compressstoresi512_mask:
12524 case X86::BI__builtin_ia32_compressstorehi128_mask:
12525 case X86::BI__builtin_ia32_compressstorehi256_mask:
12526 case X86::BI__builtin_ia32_compressstorehi512_mask:
12527 case X86::BI__builtin_ia32_compressstoreqi128_mask:
12528 case X86::BI__builtin_ia32_compressstoreqi256_mask:
12529 case X86::BI__builtin_ia32_compressstoreqi512_mask:
12530 return EmitX86CompressStore(*this, Ops);
12531
12532 case X86::BI__builtin_ia32_expanddf128_mask:
12533 case X86::BI__builtin_ia32_expanddf256_mask:
12534 case X86::BI__builtin_ia32_expanddf512_mask:
12535 case X86::BI__builtin_ia32_expandsf128_mask:
12536 case X86::BI__builtin_ia32_expandsf256_mask:
12537 case X86::BI__builtin_ia32_expandsf512_mask:
12538 case X86::BI__builtin_ia32_expanddi128_mask:
12539 case X86::BI__builtin_ia32_expanddi256_mask:
12540 case X86::BI__builtin_ia32_expanddi512_mask:
12541 case X86::BI__builtin_ia32_expandsi128_mask:
12542 case X86::BI__builtin_ia32_expandsi256_mask:
12543 case X86::BI__builtin_ia32_expandsi512_mask:
12544 case X86::BI__builtin_ia32_expandhi128_mask:
12545 case X86::BI__builtin_ia32_expandhi256_mask:
12546 case X86::BI__builtin_ia32_expandhi512_mask:
12547 case X86::BI__builtin_ia32_expandqi128_mask:
12548 case X86::BI__builtin_ia32_expandqi256_mask:
12549 case X86::BI__builtin_ia32_expandqi512_mask:
12550 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
12551
12552 case X86::BI__builtin_ia32_compressdf128_mask:
12553 case X86::BI__builtin_ia32_compressdf256_mask:
12554 case X86::BI__builtin_ia32_compressdf512_mask:
12555 case X86::BI__builtin_ia32_compresssf128_mask:
12556 case X86::BI__builtin_ia32_compresssf256_mask:
12557 case X86::BI__builtin_ia32_compresssf512_mask:
12558 case X86::BI__builtin_ia32_compressdi128_mask:
12559 case X86::BI__builtin_ia32_compressdi256_mask:
12560 case X86::BI__builtin_ia32_compressdi512_mask:
12561 case X86::BI__builtin_ia32_compresssi128_mask:
12562 case X86::BI__builtin_ia32_compresssi256_mask:
12563 case X86::BI__builtin_ia32_compresssi512_mask:
12564 case X86::BI__builtin_ia32_compresshi128_mask:
12565 case X86::BI__builtin_ia32_compresshi256_mask:
12566 case X86::BI__builtin_ia32_compresshi512_mask:
12567 case X86::BI__builtin_ia32_compressqi128_mask:
12568 case X86::BI__builtin_ia32_compressqi256_mask:
12569 case X86::BI__builtin_ia32_compressqi512_mask:
12570 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
12571
12572 case X86::BI__builtin_ia32_gather3div2df:
12573 case X86::BI__builtin_ia32_gather3div2di:
12574 case X86::BI__builtin_ia32_gather3div4df:
12575 case X86::BI__builtin_ia32_gather3div4di:
12576 case X86::BI__builtin_ia32_gather3div4sf:
12577 case X86::BI__builtin_ia32_gather3div4si:
12578 case X86::BI__builtin_ia32_gather3div8sf:
12579 case X86::BI__builtin_ia32_gather3div8si:
12580 case X86::BI__builtin_ia32_gather3siv2df:
12581 case X86::BI__builtin_ia32_gather3siv2di:
12582 case X86::BI__builtin_ia32_gather3siv4df:
12583 case X86::BI__builtin_ia32_gather3siv4di:
12584 case X86::BI__builtin_ia32_gather3siv4sf:
12585 case X86::BI__builtin_ia32_gather3siv4si:
12586 case X86::BI__builtin_ia32_gather3siv8sf:
12587 case X86::BI__builtin_ia32_gather3siv8si:
12588 case X86::BI__builtin_ia32_gathersiv8df:
12589 case X86::BI__builtin_ia32_gathersiv16sf:
12590 case X86::BI__builtin_ia32_gatherdiv8df:
12591 case X86::BI__builtin_ia32_gatherdiv16sf:
12592 case X86::BI__builtin_ia32_gathersiv8di:
12593 case X86::BI__builtin_ia32_gathersiv16si:
12594 case X86::BI__builtin_ia32_gatherdiv8di:
12595 case X86::BI__builtin_ia32_gatherdiv16si: {
12596 Intrinsic::ID IID;
12597 switch (BuiltinID) {
12598 default: llvm_unreachable("Unexpected builtin");
12599 case X86::BI__builtin_ia32_gather3div2df:
12600 IID = Intrinsic::x86_avx512_mask_gather3div2_df;
12601 break;
12602 case X86::BI__builtin_ia32_gather3div2di:
12603 IID = Intrinsic::x86_avx512_mask_gather3div2_di;
12604 break;
12605 case X86::BI__builtin_ia32_gather3div4df:
12606 IID = Intrinsic::x86_avx512_mask_gather3div4_df;
12607 break;
12608 case X86::BI__builtin_ia32_gather3div4di:
12609 IID = Intrinsic::x86_avx512_mask_gather3div4_di;
12610 break;
12611 case X86::BI__builtin_ia32_gather3div4sf:
12612 IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
12613 break;
12614 case X86::BI__builtin_ia32_gather3div4si:
12615 IID = Intrinsic::x86_avx512_mask_gather3div4_si;
12616 break;
12617 case X86::BI__builtin_ia32_gather3div8sf:
12618 IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
12619 break;
12620 case X86::BI__builtin_ia32_gather3div8si:
12621 IID = Intrinsic::x86_avx512_mask_gather3div8_si;
12622 break;
12623 case X86::BI__builtin_ia32_gather3siv2df:
12624 IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
12625 break;
12626 case X86::BI__builtin_ia32_gather3siv2di:
12627 IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
12628 break;
12629 case X86::BI__builtin_ia32_gather3siv4df:
12630 IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
12631 break;
12632 case X86::BI__builtin_ia32_gather3siv4di:
12633 IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
12634 break;
12635 case X86::BI__builtin_ia32_gather3siv4sf:
12636 IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
12637 break;
12638 case X86::BI__builtin_ia32_gather3siv4si:
12639 IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
12640 break;
12641 case X86::BI__builtin_ia32_gather3siv8sf:
12642 IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
12643 break;
12644 case X86::BI__builtin_ia32_gather3siv8si:
12645 IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
12646 break;
12647 case X86::BI__builtin_ia32_gathersiv8df:
12648 IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
12649 break;
12650 case X86::BI__builtin_ia32_gathersiv16sf:
12651 IID = Intrinsic::x86_avx512_mask_gather_dps_512;
12652 break;
12653 case X86::BI__builtin_ia32_gatherdiv8df:
12654 IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
12655 break;
12656 case X86::BI__builtin_ia32_gatherdiv16sf:
12657 IID = Intrinsic::x86_avx512_mask_gather_qps_512;
12658 break;
12659 case X86::BI__builtin_ia32_gathersiv8di:
12660 IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
12661 break;
12662 case X86::BI__builtin_ia32_gathersiv16si:
12663 IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
12664 break;
12665 case X86::BI__builtin_ia32_gatherdiv8di:
12666 IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
12667 break;
12668 case X86::BI__builtin_ia32_gatherdiv16si:
12669 IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
12670 break;
12671 }
12672
12673 unsigned MinElts = std::min(
12674 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
12675 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
12676 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
12677 Function *Intr = CGM.getIntrinsic(IID);
12678 return Builder.CreateCall(Intr, Ops);
12679 }
12680
12681 case X86::BI__builtin_ia32_scattersiv8df:
12682 case X86::BI__builtin_ia32_scattersiv16sf:
12683 case X86::BI__builtin_ia32_scatterdiv8df:
12684 case X86::BI__builtin_ia32_scatterdiv16sf:
12685 case X86::BI__builtin_ia32_scattersiv8di:
12686 case X86::BI__builtin_ia32_scattersiv16si:
12687 case X86::BI__builtin_ia32_scatterdiv8di:
12688 case X86::BI__builtin_ia32_scatterdiv16si:
12689 case X86::BI__builtin_ia32_scatterdiv2df:
12690 case X86::BI__builtin_ia32_scatterdiv2di:
12691 case X86::BI__builtin_ia32_scatterdiv4df:
12692 case X86::BI__builtin_ia32_scatterdiv4di:
12693 case X86::BI__builtin_ia32_scatterdiv4sf:
12694 case X86::BI__builtin_ia32_scatterdiv4si:
12695 case X86::BI__builtin_ia32_scatterdiv8sf:
12696 case X86::BI__builtin_ia32_scatterdiv8si:
12697 case X86::BI__builtin_ia32_scattersiv2df:
12698 case X86::BI__builtin_ia32_scattersiv2di:
12699 case X86::BI__builtin_ia32_scattersiv4df:
12700 case X86::BI__builtin_ia32_scattersiv4di:
12701 case X86::BI__builtin_ia32_scattersiv4sf:
12702 case X86::BI__builtin_ia32_scattersiv4si:
12703 case X86::BI__builtin_ia32_scattersiv8sf:
12704 case X86::BI__builtin_ia32_scattersiv8si: {
12705 Intrinsic::ID IID;
12706 switch (BuiltinID) {
12707 default: llvm_unreachable("Unexpected builtin");
12708 case X86::BI__builtin_ia32_scattersiv8df:
12709 IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
12710 break;
12711 case X86::BI__builtin_ia32_scattersiv16sf:
12712 IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
12713 break;
12714 case X86::BI__builtin_ia32_scatterdiv8df:
12715 IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
12716 break;
12717 case X86::BI__builtin_ia32_scatterdiv16sf:
12718 IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
12719 break;
12720 case X86::BI__builtin_ia32_scattersiv8di:
12721 IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
12722 break;
12723 case X86::BI__builtin_ia32_scattersiv16si:
12724 IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
12725 break;
12726 case X86::BI__builtin_ia32_scatterdiv8di:
12727 IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
12728 break;
12729 case X86::BI__builtin_ia32_scatterdiv16si:
12730 IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
12731 break;
12732 case X86::BI__builtin_ia32_scatterdiv2df:
12733 IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
12734 break;
12735 case X86::BI__builtin_ia32_scatterdiv2di:
12736 IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
12737 break;
12738 case X86::BI__builtin_ia32_scatterdiv4df:
12739 IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
12740 break;
12741 case X86::BI__builtin_ia32_scatterdiv4di:
12742 IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
12743 break;
12744 case X86::BI__builtin_ia32_scatterdiv4sf:
12745 IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
12746 break;
12747 case X86::BI__builtin_ia32_scatterdiv4si:
12748 IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
12749 break;
12750 case X86::BI__builtin_ia32_scatterdiv8sf:
12751 IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
12752 break;
12753 case X86::BI__builtin_ia32_scatterdiv8si:
12754 IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
12755 break;
12756 case X86::BI__builtin_ia32_scattersiv2df:
12757 IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
12758 break;
12759 case X86::BI__builtin_ia32_scattersiv2di:
12760 IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
12761 break;
12762 case X86::BI__builtin_ia32_scattersiv4df:
12763 IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
12764 break;
12765 case X86::BI__builtin_ia32_scattersiv4di:
12766 IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
12767 break;
12768 case X86::BI__builtin_ia32_scattersiv4sf:
12769 IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
12770 break;
12771 case X86::BI__builtin_ia32_scattersiv4si:
12772 IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
12773 break;
12774 case X86::BI__builtin_ia32_scattersiv8sf:
12775 IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
12776 break;
12777 case X86::BI__builtin_ia32_scattersiv8si:
12778 IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
12779 break;
12780 }
12781
12782 unsigned MinElts = std::min(
12783 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
12784 cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
12785 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
12786 Function *Intr = CGM.getIntrinsic(IID);
12787 return Builder.CreateCall(Intr, Ops);
12788 }
12789
12790 case X86::BI__builtin_ia32_vextractf128_pd256:
12791 case X86::BI__builtin_ia32_vextractf128_ps256:
12792 case X86::BI__builtin_ia32_vextractf128_si256:
12793 case X86::BI__builtin_ia32_extract128i256:
12794 case X86::BI__builtin_ia32_extractf64x4_mask:
12795 case X86::BI__builtin_ia32_extractf32x4_mask:
12796 case X86::BI__builtin_ia32_extracti64x4_mask:
12797 case X86::BI__builtin_ia32_extracti32x4_mask:
12798 case X86::BI__builtin_ia32_extractf32x8_mask:
12799 case X86::BI__builtin_ia32_extracti32x8_mask:
12800 case X86::BI__builtin_ia32_extractf32x4_256_mask:
12801 case X86::BI__builtin_ia32_extracti32x4_256_mask:
12802 case X86::BI__builtin_ia32_extractf64x2_256_mask:
12803 case X86::BI__builtin_ia32_extracti64x2_256_mask:
12804 case X86::BI__builtin_ia32_extractf64x2_512_mask:
12805 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
12806 auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType()));
12807 unsigned NumElts = DstTy->getNumElements();
12808 unsigned SrcNumElts =
12809 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12810 unsigned SubVectors = SrcNumElts / NumElts;
12811 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
12812 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
12813 Index &= SubVectors - 1; // Remove any extra bits.
12814 Index *= NumElts;
12815
12816 int Indices[16];
12817 for (unsigned i = 0; i != NumElts; ++i)
12818 Indices[i] = i + Index;
12819
12820 Value *Res = Builder.CreateShuffleVector(Ops[0],
12821 makeArrayRef(Indices, NumElts),
12822 "extract");
12823
12824 if (Ops.size() == 4)
12825 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
12826
12827 return Res;
12828 }
12829 case X86::BI__builtin_ia32_vinsertf128_pd256:
12830 case X86::BI__builtin_ia32_vinsertf128_ps256:
12831 case X86::BI__builtin_ia32_vinsertf128_si256:
12832 case X86::BI__builtin_ia32_insert128i256:
12833 case X86::BI__builtin_ia32_insertf64x4:
12834 case X86::BI__builtin_ia32_insertf32x4:
12835 case X86::BI__builtin_ia32_inserti64x4:
12836 case X86::BI__builtin_ia32_inserti32x4:
12837 case X86::BI__builtin_ia32_insertf32x8:
12838 case X86::BI__builtin_ia32_inserti32x8:
12839 case X86::BI__builtin_ia32_insertf32x4_256:
12840 case X86::BI__builtin_ia32_inserti32x4_256:
12841 case X86::BI__builtin_ia32_insertf64x2_256:
12842 case X86::BI__builtin_ia32_inserti64x2_256:
12843 case X86::BI__builtin_ia32_insertf64x2_512:
12844 case X86::BI__builtin_ia32_inserti64x2_512: {
12845 unsigned DstNumElts =
12846 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12847 unsigned SrcNumElts =
12848 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
12849 unsigned SubVectors = DstNumElts / SrcNumElts;
12850 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
12851 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
12852 Index &= SubVectors - 1; // Remove any extra bits.
12853 Index *= SrcNumElts;
12854
12855 int Indices[16];
12856 for (unsigned i = 0; i != DstNumElts; ++i)
12857 Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
12858
12859 Value *Op1 = Builder.CreateShuffleVector(Ops[1],
12860 makeArrayRef(Indices, DstNumElts),
12861 "widen");
12862
12863 for (unsigned i = 0; i != DstNumElts; ++i) {
12864 if (i >= Index && i < (Index + SrcNumElts))
12865 Indices[i] = (i - Index) + DstNumElts;
12866 else
12867 Indices[i] = i;
12868 }
12869
12870 return Builder.CreateShuffleVector(Ops[0], Op1,
12871 makeArrayRef(Indices, DstNumElts),
12872 "insert");
12873 }
12874 case X86::BI__builtin_ia32_pmovqd512_mask:
12875 case X86::BI__builtin_ia32_pmovwb512_mask: {
12876 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
12877 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
12878 }
12879 case X86::BI__builtin_ia32_pmovdb512_mask:
12880 case X86::BI__builtin_ia32_pmovdw512_mask:
12881 case X86::BI__builtin_ia32_pmovqw512_mask: {
12882 if (const auto *C = dyn_cast<Constant>(Ops[2]))
12883 if (C->isAllOnesValue())
12884 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
12885
12886 Intrinsic::ID IID;
12887 switch (BuiltinID) {
12888 default: llvm_unreachable("Unsupported intrinsic!");
12889 case X86::BI__builtin_ia32_pmovdb512_mask:
12890 IID = Intrinsic::x86_avx512_mask_pmov_db_512;
12891 break;
12892 case X86::BI__builtin_ia32_pmovdw512_mask:
12893 IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
12894 break;
12895 case X86::BI__builtin_ia32_pmovqw512_mask:
12896 IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
12897 break;
12898 }
12899
12900 Function *Intr = CGM.getIntrinsic(IID);
12901 return Builder.CreateCall(Intr, Ops);
12902 }
12903 case X86::BI__builtin_ia32_pblendw128:
12904 case X86::BI__builtin_ia32_blendpd:
12905 case X86::BI__builtin_ia32_blendps:
12906 case X86::BI__builtin_ia32_blendpd256:
12907 case X86::BI__builtin_ia32_blendps256:
12908 case X86::BI__builtin_ia32_pblendw256:
12909 case X86::BI__builtin_ia32_pblendd128:
12910 case X86::BI__builtin_ia32_pblendd256: {
12911 unsigned NumElts =
12912 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12913 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
12914
12915 int Indices[16];
12916 // If there are more than 8 elements, the immediate is used twice so make
12917 // sure we handle that.
12918 for (unsigned i = 0; i != NumElts; ++i)
12919 Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
12920
12921 return Builder.CreateShuffleVector(Ops[0], Ops[1],
12922 makeArrayRef(Indices, NumElts),
12923 "blend");
12924 }
12925 case X86::BI__builtin_ia32_pshuflw:
12926 case X86::BI__builtin_ia32_pshuflw256:
12927 case X86::BI__builtin_ia32_pshuflw512: {
12928 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12929 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12930 unsigned NumElts = Ty->getNumElements();
12931
12932 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12933 Imm = (Imm & 0xff) * 0x01010101;
12934
12935 int Indices[32];
12936 for (unsigned l = 0; l != NumElts; l += 8) {
12937 for (unsigned i = 0; i != 4; ++i) {
12938 Indices[l + i] = l + (Imm & 3);
12939 Imm >>= 2;
12940 }
12941 for (unsigned i = 4; i != 8; ++i)
12942 Indices[l + i] = l + i;
12943 }
12944
12945 return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
12946 "pshuflw");
12947 }
12948 case X86::BI__builtin_ia32_pshufhw:
12949 case X86::BI__builtin_ia32_pshufhw256:
12950 case X86::BI__builtin_ia32_pshufhw512: {
12951 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12952 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12953 unsigned NumElts = Ty->getNumElements();
12954
12955 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12956 Imm = (Imm & 0xff) * 0x01010101;
12957
12958 int Indices[32];
12959 for (unsigned l = 0; l != NumElts; l += 8) {
12960 for (unsigned i = 0; i != 4; ++i)
12961 Indices[l + i] = l + i;
12962 for (unsigned i = 4; i != 8; ++i) {
12963 Indices[l + i] = l + 4 + (Imm & 3);
12964 Imm >>= 2;
12965 }
12966 }
12967
12968 return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
12969 "pshufhw");
12970 }
12971 case X86::BI__builtin_ia32_pshufd:
12972 case X86::BI__builtin_ia32_pshufd256:
12973 case X86::BI__builtin_ia32_pshufd512:
12974 case X86::BI__builtin_ia32_vpermilpd:
12975 case X86::BI__builtin_ia32_vpermilps:
12976 case X86::BI__builtin_ia32_vpermilpd256:
12977 case X86::BI__builtin_ia32_vpermilps256:
12978 case X86::BI__builtin_ia32_vpermilpd512:
12979 case X86::BI__builtin_ia32_vpermilps512: {
12980 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
12981 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
12982 unsigned NumElts = Ty->getNumElements();
12983 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
12984 unsigned NumLaneElts = NumElts / NumLanes;
12985
12986 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
12987 Imm = (Imm & 0xff) * 0x01010101;
12988
12989 int Indices[16];
12990 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
12991 for (unsigned i = 0; i != NumLaneElts; ++i) {
12992 Indices[i + l] = (Imm % NumLaneElts) + l;
12993 Imm /= NumLaneElts;
12994 }
12995 }
12996
12997 return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
12998 "permil");
12999 }
13000 case X86::BI__builtin_ia32_shufpd:
13001 case X86::BI__builtin_ia32_shufpd256:
13002 case X86::BI__builtin_ia32_shufpd512:
13003 case X86::BI__builtin_ia32_shufps:
13004 case X86::BI__builtin_ia32_shufps256:
13005 case X86::BI__builtin_ia32_shufps512: {
13006 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13007 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13008 unsigned NumElts = Ty->getNumElements();
13009 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
13010 unsigned NumLaneElts = NumElts / NumLanes;
13011
13012 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13013 Imm = (Imm & 0xff) * 0x01010101;
13014
13015 int Indices[16];
13016 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
13017 for (unsigned i = 0; i != NumLaneElts; ++i) {
13018 unsigned Index = Imm % NumLaneElts;
13019 Imm /= NumLaneElts;
13020 if (i >= (NumLaneElts / 2))
13021 Index += NumElts;
13022 Indices[l + i] = l + Index;
13023 }
13024 }
13025
13026 return Builder.CreateShuffleVector(Ops[0], Ops[1],
13027 makeArrayRef(Indices, NumElts),
13028 "shufp");
13029 }
13030 case X86::BI__builtin_ia32_permdi256:
13031 case X86::BI__builtin_ia32_permdf256:
13032 case X86::BI__builtin_ia32_permdi512:
13033 case X86::BI__builtin_ia32_permdf512: {
13034 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13035 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13036 unsigned NumElts = Ty->getNumElements();
13037
13038 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
13039 int Indices[8];
13040 for (unsigned l = 0; l != NumElts; l += 4)
13041 for (unsigned i = 0; i != 4; ++i)
13042 Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
13043
13044 return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13045 "perm");
13046 }
13047 case X86::BI__builtin_ia32_palignr128:
13048 case X86::BI__builtin_ia32_palignr256:
13049 case X86::BI__builtin_ia32_palignr512: {
13050 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
13051
13052 unsigned NumElts =
13053 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13054 assert(NumElts % 16 == 0);
13055
13056 // If palignr is shifting the pair of vectors more than the size of two
13057 // lanes, emit zero.
13058 if (ShiftVal >= 32)
13059 return llvm::Constant::getNullValue(ConvertType(E->getType()));
13060
13061 // If palignr is shifting the pair of input vectors more than one lane,
13062 // but less than two lanes, convert to shifting in zeroes.
13063 if (ShiftVal > 16) {
13064 ShiftVal -= 16;
13065 Ops[1] = Ops[0];
13066 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
13067 }
13068
13069 int Indices[64];
13070 // 256-bit palignr operates on 128-bit lanes so we need to handle that
13071 for (unsigned l = 0; l != NumElts; l += 16) {
13072 for (unsigned i = 0; i != 16; ++i) {
13073 unsigned Idx = ShiftVal + i;
13074 if (Idx >= 16)
13075 Idx += NumElts - 16; // End of lane, switch operand.
13076 Indices[l + i] = Idx + l;
13077 }
13078 }
13079
13080 return Builder.CreateShuffleVector(Ops[1], Ops[0],
13081 makeArrayRef(Indices, NumElts),
13082 "palignr");
13083 }
13084 case X86::BI__builtin_ia32_alignd128:
13085 case X86::BI__builtin_ia32_alignd256:
13086 case X86::BI__builtin_ia32_alignd512:
13087 case X86::BI__builtin_ia32_alignq128:
13088 case X86::BI__builtin_ia32_alignq256:
13089 case X86::BI__builtin_ia32_alignq512: {
13090 unsigned NumElts =
13091 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13092 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
13093
13094 // Mask the shift amount to width of two vectors.
13095 ShiftVal &= (2 * NumElts) - 1;
13096
13097 int Indices[16];
13098 for (unsigned i = 0; i != NumElts; ++i)
13099 Indices[i] = i + ShiftVal;
13100
13101 return Builder.CreateShuffleVector(Ops[1], Ops[0],
13102 makeArrayRef(Indices, NumElts),
13103 "valign");
13104 }
13105 case X86::BI__builtin_ia32_shuf_f32x4_256:
13106 case X86::BI__builtin_ia32_shuf_f64x2_256:
13107 case X86::BI__builtin_ia32_shuf_i32x4_256:
13108 case X86::BI__builtin_ia32_shuf_i64x2_256:
13109 case X86::BI__builtin_ia32_shuf_f32x4:
13110 case X86::BI__builtin_ia32_shuf_f64x2:
13111 case X86::BI__builtin_ia32_shuf_i32x4:
13112 case X86::BI__builtin_ia32_shuf_i64x2: {
13113 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13114 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13115 unsigned NumElts = Ty->getNumElements();
13116 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
13117 unsigned NumLaneElts = NumElts / NumLanes;
13118
13119 int Indices[16];
13120 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
13121 unsigned Index = (Imm % NumLanes) * NumLaneElts;
13122 Imm /= NumLanes; // Discard the bits we just used.
13123 if (l >= (NumElts / 2))
13124 Index += NumElts; // Switch to other source.
13125 for (unsigned i = 0; i != NumLaneElts; ++i) {
13126 Indices[l + i] = Index + i;
13127 }
13128 }
13129
13130 return Builder.CreateShuffleVector(Ops[0], Ops[1],
13131 makeArrayRef(Indices, NumElts),
13132 "shuf");
13133 }
13134
13135 case X86::BI__builtin_ia32_vperm2f128_pd256:
13136 case X86::BI__builtin_ia32_vperm2f128_ps256:
13137 case X86::BI__builtin_ia32_vperm2f128_si256:
13138 case X86::BI__builtin_ia32_permti256: {
13139 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13140 unsigned NumElts =
13141 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13142
13143 // This takes a very simple approach since there are two lanes and a
13144 // shuffle can have 2 inputs. So we reserve the first input for the first
13145 // lane and the second input for the second lane. This may result in
13146 // duplicate sources, but this can be dealt with in the backend.
13147
13148 Value *OutOps[2];
13149 int Indices[8];
13150 for (unsigned l = 0; l != 2; ++l) {
13151 // Determine the source for this lane.
13152 if (Imm & (1 << ((l * 4) + 3)))
13153 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
13154 else if (Imm & (1 << ((l * 4) + 1)))
13155 OutOps[l] = Ops[1];
13156 else
13157 OutOps[l] = Ops[0];
13158
13159 for (unsigned i = 0; i != NumElts/2; ++i) {
13160 // Start with ith element of the source for this lane.
13161 unsigned Idx = (l * NumElts) + i;
13162 // If bit 0 of the immediate half is set, switch to the high half of
13163 // the source.
13164 if (Imm & (1 << (l * 4)))
13165 Idx += NumElts/2;
13166 Indices[(l * (NumElts/2)) + i] = Idx;
13167 }
13168 }
13169
13170 return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
13171 makeArrayRef(Indices, NumElts),
13172 "vperm");
13173 }
13174
13175 case X86::BI__builtin_ia32_pslldqi128_byteshift:
13176 case X86::BI__builtin_ia32_pslldqi256_byteshift:
13177 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
13178 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13179 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
13180 // Builtin type is vXi64 so multiply by 8 to get bytes.
13181 unsigned NumElts = ResultType->getNumElements() * 8;
13182
13183 // If pslldq is shifting the vector more than 15 bytes, emit zero.
13184 if (ShiftVal >= 16)
13185 return llvm::Constant::getNullValue(ResultType);
13186
13187 int Indices[64];
13188 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
13189 for (unsigned l = 0; l != NumElts; l += 16) {
13190 for (unsigned i = 0; i != 16; ++i) {
13191 unsigned Idx = NumElts + i - ShiftVal;
13192 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
13193 Indices[l + i] = Idx + l;
13194 }
13195 }
13196
13197 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
13198 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
13199 Value *Zero = llvm::Constant::getNullValue(VecTy);
13200 Value *SV = Builder.CreateShuffleVector(Zero, Cast,
13201 makeArrayRef(Indices, NumElts),
13202 "pslldq");
13203 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
13204 }
13205 case X86::BI__builtin_ia32_psrldqi128_byteshift:
13206 case X86::BI__builtin_ia32_psrldqi256_byteshift:
13207 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
13208 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13209 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
13210 // Builtin type is vXi64 so multiply by 8 to get bytes.
13211 unsigned NumElts = ResultType->getNumElements() * 8;
13212
13213 // If psrldq is shifting the vector more than 15 bytes, emit zero.
13214 if (ShiftVal >= 16)
13215 return llvm::Constant::getNullValue(ResultType);
13216
13217 int Indices[64];
13218 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
13219 for (unsigned l = 0; l != NumElts; l += 16) {
13220 for (unsigned i = 0; i != 16; ++i) {
13221 unsigned Idx = i + ShiftVal;
13222 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
13223 Indices[l + i] = Idx + l;
13224 }
13225 }
13226
13227 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
13228 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
13229 Value *Zero = llvm::Constant::getNullValue(VecTy);
13230 Value *SV = Builder.CreateShuffleVector(Cast, Zero,
13231 makeArrayRef(Indices, NumElts),
13232 "psrldq");
13233 return Builder.CreateBitCast(SV, ResultType, "cast");
13234 }
13235 case X86::BI__builtin_ia32_kshiftliqi:
13236 case X86::BI__builtin_ia32_kshiftlihi:
13237 case X86::BI__builtin_ia32_kshiftlisi:
13238 case X86::BI__builtin_ia32_kshiftlidi: {
13239 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13240 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13241
13242 if (ShiftVal >= NumElts)
13243 return llvm::Constant::getNullValue(Ops[0]->getType());
13244
13245 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
13246
13247 int Indices[64];
13248 for (unsigned i = 0; i != NumElts; ++i)
13249 Indices[i] = NumElts + i - ShiftVal;
13250
13251 Value *Zero = llvm::Constant::getNullValue(In->getType());
13252 Value *SV = Builder.CreateShuffleVector(Zero, In,
13253 makeArrayRef(Indices, NumElts),
13254 "kshiftl");
13255 return Builder.CreateBitCast(SV, Ops[0]->getType());
13256 }
13257 case X86::BI__builtin_ia32_kshiftriqi:
13258 case X86::BI__builtin_ia32_kshiftrihi:
13259 case X86::BI__builtin_ia32_kshiftrisi:
13260 case X86::BI__builtin_ia32_kshiftridi: {
13261 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13262 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13263
13264 if (ShiftVal >= NumElts)
13265 return llvm::Constant::getNullValue(Ops[0]->getType());
13266
13267 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
13268
13269 int Indices[64];
13270 for (unsigned i = 0; i != NumElts; ++i)
13271 Indices[i] = i + ShiftVal;
13272
13273 Value *Zero = llvm::Constant::getNullValue(In->getType());
13274 Value *SV = Builder.CreateShuffleVector(In, Zero,
13275 makeArrayRef(Indices, NumElts),
13276 "kshiftr");
13277 return Builder.CreateBitCast(SV, Ops[0]->getType());
13278 }
13279 case X86::BI__builtin_ia32_movnti:
13280 case X86::BI__builtin_ia32_movnti64:
13281 case X86::BI__builtin_ia32_movntsd:
13282 case X86::BI__builtin_ia32_movntss: {
13283 llvm::MDNode *Node = llvm::MDNode::get(
13284 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
13285
13286 Value *Ptr = Ops[0];
13287 Value *Src = Ops[1];
13288
13289 // Extract the 0'th element of the source vector.
13290 if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
13291 BuiltinID == X86::BI__builtin_ia32_movntss)
13292 Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
13293
13294 // Convert the type of the pointer to a pointer to the stored type.
13295 Value *BC = Builder.CreateBitCast(
13296 Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
13297
13298 // Unaligned nontemporal store of the scalar value.
13299 StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
13300 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
13301 SI->setAlignment(llvm::Align(1));
13302 return SI;
13303 }
13304 // Rotate is a special case of funnel shift - 1st 2 args are the same.
13305 case X86::BI__builtin_ia32_vprotb:
13306 case X86::BI__builtin_ia32_vprotw:
13307 case X86::BI__builtin_ia32_vprotd:
13308 case X86::BI__builtin_ia32_vprotq:
13309 case X86::BI__builtin_ia32_vprotbi:
13310 case X86::BI__builtin_ia32_vprotwi:
13311 case X86::BI__builtin_ia32_vprotdi:
13312 case X86::BI__builtin_ia32_vprotqi:
13313 case X86::BI__builtin_ia32_prold128:
13314 case X86::BI__builtin_ia32_prold256:
13315 case X86::BI__builtin_ia32_prold512:
13316 case X86::BI__builtin_ia32_prolq128:
13317 case X86::BI__builtin_ia32_prolq256:
13318 case X86::BI__builtin_ia32_prolq512:
13319 case X86::BI__builtin_ia32_prolvd128:
13320 case X86::BI__builtin_ia32_prolvd256:
13321 case X86::BI__builtin_ia32_prolvd512:
13322 case X86::BI__builtin_ia32_prolvq128:
13323 case X86::BI__builtin_ia32_prolvq256:
13324 case X86::BI__builtin_ia32_prolvq512:
13325 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
13326 case X86::BI__builtin_ia32_prord128:
13327 case X86::BI__builtin_ia32_prord256:
13328 case X86::BI__builtin_ia32_prord512:
13329 case X86::BI__builtin_ia32_prorq128:
13330 case X86::BI__builtin_ia32_prorq256:
13331 case X86::BI__builtin_ia32_prorq512:
13332 case X86::BI__builtin_ia32_prorvd128:
13333 case X86::BI__builtin_ia32_prorvd256:
13334 case X86::BI__builtin_ia32_prorvd512:
13335 case X86::BI__builtin_ia32_prorvq128:
13336 case X86::BI__builtin_ia32_prorvq256:
13337 case X86::BI__builtin_ia32_prorvq512:
13338 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
13339 case X86::BI__builtin_ia32_selectb_128:
13340 case X86::BI__builtin_ia32_selectb_256:
13341 case X86::BI__builtin_ia32_selectb_512:
13342 case X86::BI__builtin_ia32_selectw_128:
13343 case X86::BI__builtin_ia32_selectw_256:
13344 case X86::BI__builtin_ia32_selectw_512:
13345 case X86::BI__builtin_ia32_selectd_128:
13346 case X86::BI__builtin_ia32_selectd_256:
13347 case X86::BI__builtin_ia32_selectd_512:
13348 case X86::BI__builtin_ia32_selectq_128:
13349 case X86::BI__builtin_ia32_selectq_256:
13350 case X86::BI__builtin_ia32_selectq_512:
13351 case X86::BI__builtin_ia32_selectps_128:
13352 case X86::BI__builtin_ia32_selectps_256:
13353 case X86::BI__builtin_ia32_selectps_512:
13354 case X86::BI__builtin_ia32_selectpd_128:
13355 case X86::BI__builtin_ia32_selectpd_256:
13356 case X86::BI__builtin_ia32_selectpd_512:
13357 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
13358 case X86::BI__builtin_ia32_selectss_128:
13359 case X86::BI__builtin_ia32_selectsd_128: {
13360 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13361 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13362 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
13363 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
13364 }
13365 case X86::BI__builtin_ia32_cmpb128_mask:
13366 case X86::BI__builtin_ia32_cmpb256_mask:
13367 case X86::BI__builtin_ia32_cmpb512_mask:
13368 case X86::BI__builtin_ia32_cmpw128_mask:
13369 case X86::BI__builtin_ia32_cmpw256_mask:
13370 case X86::BI__builtin_ia32_cmpw512_mask:
13371 case X86::BI__builtin_ia32_cmpd128_mask:
13372 case X86::BI__builtin_ia32_cmpd256_mask:
13373 case X86::BI__builtin_ia32_cmpd512_mask:
13374 case X86::BI__builtin_ia32_cmpq128_mask:
13375 case X86::BI__builtin_ia32_cmpq256_mask:
13376 case X86::BI__builtin_ia32_cmpq512_mask: {
13377 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13378 return EmitX86MaskedCompare(*this, CC, true, Ops);
13379 }
13380 case X86::BI__builtin_ia32_ucmpb128_mask:
13381 case X86::BI__builtin_ia32_ucmpb256_mask:
13382 case X86::BI__builtin_ia32_ucmpb512_mask:
13383 case X86::BI__builtin_ia32_ucmpw128_mask:
13384 case X86::BI__builtin_ia32_ucmpw256_mask:
13385 case X86::BI__builtin_ia32_ucmpw512_mask:
13386 case X86::BI__builtin_ia32_ucmpd128_mask:
13387 case X86::BI__builtin_ia32_ucmpd256_mask:
13388 case X86::BI__builtin_ia32_ucmpd512_mask:
13389 case X86::BI__builtin_ia32_ucmpq128_mask:
13390 case X86::BI__builtin_ia32_ucmpq256_mask:
13391 case X86::BI__builtin_ia32_ucmpq512_mask: {
13392 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13393 return EmitX86MaskedCompare(*this, CC, false, Ops);
13394 }
13395 case X86::BI__builtin_ia32_vpcomb:
13396 case X86::BI__builtin_ia32_vpcomw:
13397 case X86::BI__builtin_ia32_vpcomd:
13398 case X86::BI__builtin_ia32_vpcomq:
13399 return EmitX86vpcom(*this, Ops, true);
13400 case X86::BI__builtin_ia32_vpcomub:
13401 case X86::BI__builtin_ia32_vpcomuw:
13402 case X86::BI__builtin_ia32_vpcomud:
13403 case X86::BI__builtin_ia32_vpcomuq:
13404 return EmitX86vpcom(*this, Ops, false);
13405
13406 case X86::BI__builtin_ia32_kortestcqi:
13407 case X86::BI__builtin_ia32_kortestchi:
13408 case X86::BI__builtin_ia32_kortestcsi:
13409 case X86::BI__builtin_ia32_kortestcdi: {
13410 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13411 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
13412 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13413 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13414 }
13415 case X86::BI__builtin_ia32_kortestzqi:
13416 case X86::BI__builtin_ia32_kortestzhi:
13417 case X86::BI__builtin_ia32_kortestzsi:
13418 case X86::BI__builtin_ia32_kortestzdi: {
13419 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13420 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
13421 Value *Cmp = Builder.CreateICmpEQ(Or, C);
13422 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13423 }
13424
13425 case X86::BI__builtin_ia32_ktestcqi:
13426 case X86::BI__builtin_ia32_ktestzqi:
13427 case X86::BI__builtin_ia32_ktestchi:
13428 case X86::BI__builtin_ia32_ktestzhi:
13429 case X86::BI__builtin_ia32_ktestcsi:
13430 case X86::BI__builtin_ia32_ktestzsi:
13431 case X86::BI__builtin_ia32_ktestcdi:
13432 case X86::BI__builtin_ia32_ktestzdi: {
13433 Intrinsic::ID IID;
13434 switch (BuiltinID) {
13435 default: llvm_unreachable("Unsupported intrinsic!");
13436 case X86::BI__builtin_ia32_ktestcqi:
13437 IID = Intrinsic::x86_avx512_ktestc_b;
13438 break;
13439 case X86::BI__builtin_ia32_ktestzqi:
13440 IID = Intrinsic::x86_avx512_ktestz_b;
13441 break;
13442 case X86::BI__builtin_ia32_ktestchi:
13443 IID = Intrinsic::x86_avx512_ktestc_w;
13444 break;
13445 case X86::BI__builtin_ia32_ktestzhi:
13446 IID = Intrinsic::x86_avx512_ktestz_w;
13447 break;
13448 case X86::BI__builtin_ia32_ktestcsi:
13449 IID = Intrinsic::x86_avx512_ktestc_d;
13450 break;
13451 case X86::BI__builtin_ia32_ktestzsi:
13452 IID = Intrinsic::x86_avx512_ktestz_d;
13453 break;
13454 case X86::BI__builtin_ia32_ktestcdi:
13455 IID = Intrinsic::x86_avx512_ktestc_q;
13456 break;
13457 case X86::BI__builtin_ia32_ktestzdi:
13458 IID = Intrinsic::x86_avx512_ktestz_q;
13459 break;
13460 }
13461
13462 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13463 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13464 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13465 Function *Intr = CGM.getIntrinsic(IID);
13466 return Builder.CreateCall(Intr, {LHS, RHS});
13467 }
13468
13469 case X86::BI__builtin_ia32_kaddqi:
13470 case X86::BI__builtin_ia32_kaddhi:
13471 case X86::BI__builtin_ia32_kaddsi:
13472 case X86::BI__builtin_ia32_kadddi: {
13473 Intrinsic::ID IID;
13474 switch (BuiltinID) {
13475 default: llvm_unreachable("Unsupported intrinsic!");
13476 case X86::BI__builtin_ia32_kaddqi:
13477 IID = Intrinsic::x86_avx512_kadd_b;
13478 break;
13479 case X86::BI__builtin_ia32_kaddhi:
13480 IID = Intrinsic::x86_avx512_kadd_w;
13481 break;
13482 case X86::BI__builtin_ia32_kaddsi:
13483 IID = Intrinsic::x86_avx512_kadd_d;
13484 break;
13485 case X86::BI__builtin_ia32_kadddi:
13486 IID = Intrinsic::x86_avx512_kadd_q;
13487 break;
13488 }
13489
13490 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13491 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13492 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13493 Function *Intr = CGM.getIntrinsic(IID);
13494 Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
13495 return Builder.CreateBitCast(Res, Ops[0]->getType());
13496 }
13497 case X86::BI__builtin_ia32_kandqi:
13498 case X86::BI__builtin_ia32_kandhi:
13499 case X86::BI__builtin_ia32_kandsi:
13500 case X86::BI__builtin_ia32_kanddi:
13501 return EmitX86MaskLogic(*this, Instruction::And, Ops);
13502 case X86::BI__builtin_ia32_kandnqi:
13503 case X86::BI__builtin_ia32_kandnhi:
13504 case X86::BI__builtin_ia32_kandnsi:
13505 case X86::BI__builtin_ia32_kandndi:
13506 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
13507 case X86::BI__builtin_ia32_korqi:
13508 case X86::BI__builtin_ia32_korhi:
13509 case X86::BI__builtin_ia32_korsi:
13510 case X86::BI__builtin_ia32_kordi:
13511 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
13512 case X86::BI__builtin_ia32_kxnorqi:
13513 case X86::BI__builtin_ia32_kxnorhi:
13514 case X86::BI__builtin_ia32_kxnorsi:
13515 case X86::BI__builtin_ia32_kxnordi:
13516 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
13517 case X86::BI__builtin_ia32_kxorqi:
13518 case X86::BI__builtin_ia32_kxorhi:
13519 case X86::BI__builtin_ia32_kxorsi:
13520 case X86::BI__builtin_ia32_kxordi:
13521 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
13522 case X86::BI__builtin_ia32_knotqi:
13523 case X86::BI__builtin_ia32_knothi:
13524 case X86::BI__builtin_ia32_knotsi:
13525 case X86::BI__builtin_ia32_knotdi: {
13526 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13527 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
13528 return Builder.CreateBitCast(Builder.CreateNot(Res),
13529 Ops[0]->getType());
13530 }
13531 case X86::BI__builtin_ia32_kmovb:
13532 case X86::BI__builtin_ia32_kmovw:
13533 case X86::BI__builtin_ia32_kmovd:
13534 case X86::BI__builtin_ia32_kmovq: {
13535 // Bitcast to vXi1 type and then back to integer. This gets the mask
13536 // register type into the IR, but might be optimized out depending on
13537 // what's around it.
13538 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13539 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
13540 return Builder.CreateBitCast(Res, Ops[0]->getType());
13541 }
13542
13543 case X86::BI__builtin_ia32_kunpckdi:
13544 case X86::BI__builtin_ia32_kunpcksi:
13545 case X86::BI__builtin_ia32_kunpckhi: {
13546 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13547 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13548 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13549 int Indices[64];
13550 for (unsigned i = 0; i != NumElts; ++i)
13551 Indices[i] = i;
13552
13553 // First extract half of each vector. This gives better codegen than
13554 // doing it in a single shuffle.
13555 LHS = Builder.CreateShuffleVector(LHS, LHS,
13556 makeArrayRef(Indices, NumElts / 2));
13557 RHS = Builder.CreateShuffleVector(RHS, RHS,
13558 makeArrayRef(Indices, NumElts / 2));
13559 // Concat the vectors.
13560 // NOTE: Operands are swapped to match the intrinsic definition.
13561 Value *Res = Builder.CreateShuffleVector(RHS, LHS,
13562 makeArrayRef(Indices, NumElts));
13563 return Builder.CreateBitCast(Res, Ops[0]->getType());
13564 }
13565
13566 case X86::BI__builtin_ia32_vplzcntd_128:
13567 case X86::BI__builtin_ia32_vplzcntd_256:
13568 case X86::BI__builtin_ia32_vplzcntd_512:
13569 case X86::BI__builtin_ia32_vplzcntq_128:
13570 case X86::BI__builtin_ia32_vplzcntq_256:
13571 case X86::BI__builtin_ia32_vplzcntq_512: {
13572 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
13573 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
13574 }
13575 case X86::BI__builtin_ia32_sqrtss:
13576 case X86::BI__builtin_ia32_sqrtsd: {
13577 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
13578 Function *F;
13579 if (Builder.getIsFPConstrained()) {
13580 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13581 A->getType());
13582 A = Builder.CreateConstrainedFPCall(F, {A});
13583 } else {
13584 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
13585 A = Builder.CreateCall(F, {A});
13586 }
13587 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
13588 }
13589 case X86::BI__builtin_ia32_sqrtsd_round_mask:
13590 case X86::BI__builtin_ia32_sqrtss_round_mask: {
13591 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
13592 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
13593 // otherwise keep the intrinsic.
13594 if (CC != 4) {
13595 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
13596 Intrinsic::x86_avx512_mask_sqrt_sd :
13597 Intrinsic::x86_avx512_mask_sqrt_ss;
13598 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13599 }
13600 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13601 Function *F;
13602 if (Builder.getIsFPConstrained()) {
13603 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13604 A->getType());
13605 A = Builder.CreateConstrainedFPCall(F, A);
13606 } else {
13607 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
13608 A = Builder.CreateCall(F, A);
13609 }
13610 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13611 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
13612 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
13613 }
13614 case X86::BI__builtin_ia32_sqrtpd256:
13615 case X86::BI__builtin_ia32_sqrtpd:
13616 case X86::BI__builtin_ia32_sqrtps256:
13617 case X86::BI__builtin_ia32_sqrtps:
13618 case X86::BI__builtin_ia32_sqrtps512:
13619 case X86::BI__builtin_ia32_sqrtpd512: {
13620 if (Ops.size() == 2) {
13621 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13622 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
13623 // otherwise keep the intrinsic.
13624 if (CC != 4) {
13625 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
13626 Intrinsic::x86_avx512_sqrt_ps_512 :
13627 Intrinsic::x86_avx512_sqrt_pd_512;
13628 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
13629 }
13630 }
13631 if (Builder.getIsFPConstrained()) {
13632 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
13633 Ops[0]->getType());
13634 return Builder.CreateConstrainedFPCall(F, Ops[0]);
13635 } else {
13636 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
13637 return Builder.CreateCall(F, Ops[0]);
13638 }
13639 }
13640 case X86::BI__builtin_ia32_pabsb128:
13641 case X86::BI__builtin_ia32_pabsw128:
13642 case X86::BI__builtin_ia32_pabsd128:
13643 case X86::BI__builtin_ia32_pabsb256:
13644 case X86::BI__builtin_ia32_pabsw256:
13645 case X86::BI__builtin_ia32_pabsd256:
13646 case X86::BI__builtin_ia32_pabsq128:
13647 case X86::BI__builtin_ia32_pabsq256:
13648 case X86::BI__builtin_ia32_pabsb512:
13649 case X86::BI__builtin_ia32_pabsw512:
13650 case X86::BI__builtin_ia32_pabsd512:
13651 case X86::BI__builtin_ia32_pabsq512: {
13652 Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType());
13653 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
13654 }
13655 case X86::BI__builtin_ia32_pmaxsb128:
13656 case X86::BI__builtin_ia32_pmaxsw128:
13657 case X86::BI__builtin_ia32_pmaxsd128:
13658 case X86::BI__builtin_ia32_pmaxsq128:
13659 case X86::BI__builtin_ia32_pmaxsb256:
13660 case X86::BI__builtin_ia32_pmaxsw256:
13661 case X86::BI__builtin_ia32_pmaxsd256:
13662 case X86::BI__builtin_ia32_pmaxsq256:
13663 case X86::BI__builtin_ia32_pmaxsb512:
13664 case X86::BI__builtin_ia32_pmaxsw512:
13665 case X86::BI__builtin_ia32_pmaxsd512:
13666 case X86::BI__builtin_ia32_pmaxsq512:
13667 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax);
13668 case X86::BI__builtin_ia32_pmaxub128:
13669 case X86::BI__builtin_ia32_pmaxuw128:
13670 case X86::BI__builtin_ia32_pmaxud128:
13671 case X86::BI__builtin_ia32_pmaxuq128:
13672 case X86::BI__builtin_ia32_pmaxub256:
13673 case X86::BI__builtin_ia32_pmaxuw256:
13674 case X86::BI__builtin_ia32_pmaxud256:
13675 case X86::BI__builtin_ia32_pmaxuq256:
13676 case X86::BI__builtin_ia32_pmaxub512:
13677 case X86::BI__builtin_ia32_pmaxuw512:
13678 case X86::BI__builtin_ia32_pmaxud512:
13679 case X86::BI__builtin_ia32_pmaxuq512:
13680 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax);
13681 case X86::BI__builtin_ia32_pminsb128:
13682 case X86::BI__builtin_ia32_pminsw128:
13683 case X86::BI__builtin_ia32_pminsd128:
13684 case X86::BI__builtin_ia32_pminsq128:
13685 case X86::BI__builtin_ia32_pminsb256:
13686 case X86::BI__builtin_ia32_pminsw256:
13687 case X86::BI__builtin_ia32_pminsd256:
13688 case X86::BI__builtin_ia32_pminsq256:
13689 case X86::BI__builtin_ia32_pminsb512:
13690 case X86::BI__builtin_ia32_pminsw512:
13691 case X86::BI__builtin_ia32_pminsd512:
13692 case X86::BI__builtin_ia32_pminsq512:
13693 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin);
13694 case X86::BI__builtin_ia32_pminub128:
13695 case X86::BI__builtin_ia32_pminuw128:
13696 case X86::BI__builtin_ia32_pminud128:
13697 case X86::BI__builtin_ia32_pminuq128:
13698 case X86::BI__builtin_ia32_pminub256:
13699 case X86::BI__builtin_ia32_pminuw256:
13700 case X86::BI__builtin_ia32_pminud256:
13701 case X86::BI__builtin_ia32_pminuq256:
13702 case X86::BI__builtin_ia32_pminub512:
13703 case X86::BI__builtin_ia32_pminuw512:
13704 case X86::BI__builtin_ia32_pminud512:
13705 case X86::BI__builtin_ia32_pminuq512:
13706 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin);
13707
13708 case X86::BI__builtin_ia32_pmuludq128:
13709 case X86::BI__builtin_ia32_pmuludq256:
13710 case X86::BI__builtin_ia32_pmuludq512:
13711 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
13712
13713 case X86::BI__builtin_ia32_pmuldq128:
13714 case X86::BI__builtin_ia32_pmuldq256:
13715 case X86::BI__builtin_ia32_pmuldq512:
13716 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
13717
13718 case X86::BI__builtin_ia32_pternlogd512_mask:
13719 case X86::BI__builtin_ia32_pternlogq512_mask:
13720 case X86::BI__builtin_ia32_pternlogd128_mask:
13721 case X86::BI__builtin_ia32_pternlogd256_mask:
13722 case X86::BI__builtin_ia32_pternlogq128_mask:
13723 case X86::BI__builtin_ia32_pternlogq256_mask:
13724 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
13725
13726 case X86::BI__builtin_ia32_pternlogd512_maskz:
13727 case X86::BI__builtin_ia32_pternlogq512_maskz:
13728 case X86::BI__builtin_ia32_pternlogd128_maskz:
13729 case X86::BI__builtin_ia32_pternlogd256_maskz:
13730 case X86::BI__builtin_ia32_pternlogq128_maskz:
13731 case X86::BI__builtin_ia32_pternlogq256_maskz:
13732 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
13733
13734 case X86::BI__builtin_ia32_vpshldd128:
13735 case X86::BI__builtin_ia32_vpshldd256:
13736 case X86::BI__builtin_ia32_vpshldd512:
13737 case X86::BI__builtin_ia32_vpshldq128:
13738 case X86::BI__builtin_ia32_vpshldq256:
13739 case X86::BI__builtin_ia32_vpshldq512:
13740 case X86::BI__builtin_ia32_vpshldw128:
13741 case X86::BI__builtin_ia32_vpshldw256:
13742 case X86::BI__builtin_ia32_vpshldw512:
13743 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
13744
13745 case X86::BI__builtin_ia32_vpshrdd128:
13746 case X86::BI__builtin_ia32_vpshrdd256:
13747 case X86::BI__builtin_ia32_vpshrdd512:
13748 case X86::BI__builtin_ia32_vpshrdq128:
13749 case X86::BI__builtin_ia32_vpshrdq256:
13750 case X86::BI__builtin_ia32_vpshrdq512:
13751 case X86::BI__builtin_ia32_vpshrdw128:
13752 case X86::BI__builtin_ia32_vpshrdw256:
13753 case X86::BI__builtin_ia32_vpshrdw512:
13754 // Ops 0 and 1 are swapped.
13755 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
13756
13757 case X86::BI__builtin_ia32_vpshldvd128:
13758 case X86::BI__builtin_ia32_vpshldvd256:
13759 case X86::BI__builtin_ia32_vpshldvd512:
13760 case X86::BI__builtin_ia32_vpshldvq128:
13761 case X86::BI__builtin_ia32_vpshldvq256:
13762 case X86::BI__builtin_ia32_vpshldvq512:
13763 case X86::BI__builtin_ia32_vpshldvw128:
13764 case X86::BI__builtin_ia32_vpshldvw256:
13765 case X86::BI__builtin_ia32_vpshldvw512:
13766 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
13767
13768 case X86::BI__builtin_ia32_vpshrdvd128:
13769 case X86::BI__builtin_ia32_vpshrdvd256:
13770 case X86::BI__builtin_ia32_vpshrdvd512:
13771 case X86::BI__builtin_ia32_vpshrdvq128:
13772 case X86::BI__builtin_ia32_vpshrdvq256:
13773 case X86::BI__builtin_ia32_vpshrdvq512:
13774 case X86::BI__builtin_ia32_vpshrdvw128:
13775 case X86::BI__builtin_ia32_vpshrdvw256:
13776 case X86::BI__builtin_ia32_vpshrdvw512:
13777 // Ops 0 and 1 are swapped.
13778 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
13779
13780 // Reductions
13781 case X86::BI__builtin_ia32_reduce_add_d512:
13782 case X86::BI__builtin_ia32_reduce_add_q512: {
13783 Function *F =
13784 CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType());
13785 return Builder.CreateCall(F, {Ops[0]});
13786 }
13787 case X86::BI__builtin_ia32_reduce_and_d512:
13788 case X86::BI__builtin_ia32_reduce_and_q512: {
13789 Function *F =
13790 CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType());
13791 return Builder.CreateCall(F, {Ops[0]});
13792 }
13793 case X86::BI__builtin_ia32_reduce_fadd_pd512:
13794 case X86::BI__builtin_ia32_reduce_fadd_ps512: {
13795 Function *F =
13796 CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType());
13797 Builder.getFastMathFlags().setAllowReassoc(true);
13798 return Builder.CreateCall(F, {Ops[0], Ops[1]});
13799 }
13800 case X86::BI__builtin_ia32_reduce_fmul_pd512:
13801 case X86::BI__builtin_ia32_reduce_fmul_ps512: {
13802 Function *F =
13803 CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType());
13804 Builder.getFastMathFlags().setAllowReassoc(true);
13805 return Builder.CreateCall(F, {Ops[0], Ops[1]});
13806 }
13807 case X86::BI__builtin_ia32_reduce_mul_d512:
13808 case X86::BI__builtin_ia32_reduce_mul_q512: {
13809 Function *F =
13810 CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType());
13811 return Builder.CreateCall(F, {Ops[0]});
13812 }
13813 case X86::BI__builtin_ia32_reduce_or_d512:
13814 case X86::BI__builtin_ia32_reduce_or_q512: {
13815 Function *F =
13816 CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType());
13817 return Builder.CreateCall(F, {Ops[0]});
13818 }
13819 case X86::BI__builtin_ia32_reduce_smax_d512:
13820 case X86::BI__builtin_ia32_reduce_smax_q512: {
13821 Function *F =
13822 CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType());
13823 return Builder.CreateCall(F, {Ops[0]});
13824 }
13825 case X86::BI__builtin_ia32_reduce_smin_d512:
13826 case X86::BI__builtin_ia32_reduce_smin_q512: {
13827 Function *F =
13828 CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType());
13829 return Builder.CreateCall(F, {Ops[0]});
13830 }
13831 case X86::BI__builtin_ia32_reduce_umax_d512:
13832 case X86::BI__builtin_ia32_reduce_umax_q512: {
13833 Function *F =
13834 CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType());
13835 return Builder.CreateCall(F, {Ops[0]});
13836 }
13837 case X86::BI__builtin_ia32_reduce_umin_d512:
13838 case X86::BI__builtin_ia32_reduce_umin_q512: {
13839 Function *F =
13840 CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType());
13841 return Builder.CreateCall(F, {Ops[0]});
13842 }
13843
13844 // 3DNow!
13845 case X86::BI__builtin_ia32_pswapdsf:
13846 case X86::BI__builtin_ia32_pswapdsi: {
13847 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
13848 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
13849 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
13850 return Builder.CreateCall(F, Ops, "pswapd");
13851 }
13852 case X86::BI__builtin_ia32_rdrand16_step:
13853 case X86::BI__builtin_ia32_rdrand32_step:
13854 case X86::BI__builtin_ia32_rdrand64_step:
13855 case X86::BI__builtin_ia32_rdseed16_step:
13856 case X86::BI__builtin_ia32_rdseed32_step:
13857 case X86::BI__builtin_ia32_rdseed64_step: {
13858 Intrinsic::ID ID;
13859 switch (BuiltinID) {
13860 default: llvm_unreachable("Unsupported intrinsic!");
13861 case X86::BI__builtin_ia32_rdrand16_step:
13862 ID = Intrinsic::x86_rdrand_16;
13863 break;
13864 case X86::BI__builtin_ia32_rdrand32_step:
13865 ID = Intrinsic::x86_rdrand_32;
13866 break;
13867 case X86::BI__builtin_ia32_rdrand64_step:
13868 ID = Intrinsic::x86_rdrand_64;
13869 break;
13870 case X86::BI__builtin_ia32_rdseed16_step:
13871 ID = Intrinsic::x86_rdseed_16;
13872 break;
13873 case X86::BI__builtin_ia32_rdseed32_step:
13874 ID = Intrinsic::x86_rdseed_32;
13875 break;
13876 case X86::BI__builtin_ia32_rdseed64_step:
13877 ID = Intrinsic::x86_rdseed_64;
13878 break;
13879 }
13880
13881 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
13882 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
13883 Ops[0]);
13884 return Builder.CreateExtractValue(Call, 1);
13885 }
13886 case X86::BI__builtin_ia32_addcarryx_u32:
13887 case X86::BI__builtin_ia32_addcarryx_u64:
13888 case X86::BI__builtin_ia32_subborrow_u32:
13889 case X86::BI__builtin_ia32_subborrow_u64: {
13890 Intrinsic::ID IID;
13891 switch (BuiltinID) {
13892 default: llvm_unreachable("Unsupported intrinsic!");
13893 case X86::BI__builtin_ia32_addcarryx_u32:
13894 IID = Intrinsic::x86_addcarry_32;
13895 break;
13896 case X86::BI__builtin_ia32_addcarryx_u64:
13897 IID = Intrinsic::x86_addcarry_64;
13898 break;
13899 case X86::BI__builtin_ia32_subborrow_u32:
13900 IID = Intrinsic::x86_subborrow_32;
13901 break;
13902 case X86::BI__builtin_ia32_subborrow_u64:
13903 IID = Intrinsic::x86_subborrow_64;
13904 break;
13905 }
13906
13907 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
13908 { Ops[0], Ops[1], Ops[2] });
13909 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
13910 Ops[3]);
13911 return Builder.CreateExtractValue(Call, 0);
13912 }
13913
13914 case X86::BI__builtin_ia32_fpclassps128_mask:
13915 case X86::BI__builtin_ia32_fpclassps256_mask:
13916 case X86::BI__builtin_ia32_fpclassps512_mask:
13917 case X86::BI__builtin_ia32_fpclasspd128_mask:
13918 case X86::BI__builtin_ia32_fpclasspd256_mask:
13919 case X86::BI__builtin_ia32_fpclasspd512_mask: {
13920 unsigned NumElts =
13921 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13922 Value *MaskIn = Ops[2];
13923 Ops.erase(&Ops[2]);
13924
13925 Intrinsic::ID ID;
13926 switch (BuiltinID) {
13927 default: llvm_unreachable("Unsupported intrinsic!");
13928 case X86::BI__builtin_ia32_fpclassps128_mask:
13929 ID = Intrinsic::x86_avx512_fpclass_ps_128;
13930 break;
13931 case X86::BI__builtin_ia32_fpclassps256_mask:
13932 ID = Intrinsic::x86_avx512_fpclass_ps_256;
13933 break;
13934 case X86::BI__builtin_ia32_fpclassps512_mask:
13935 ID = Intrinsic::x86_avx512_fpclass_ps_512;
13936 break;
13937 case X86::BI__builtin_ia32_fpclasspd128_mask:
13938 ID = Intrinsic::x86_avx512_fpclass_pd_128;
13939 break;
13940 case X86::BI__builtin_ia32_fpclasspd256_mask:
13941 ID = Intrinsic::x86_avx512_fpclass_pd_256;
13942 break;
13943 case X86::BI__builtin_ia32_fpclasspd512_mask:
13944 ID = Intrinsic::x86_avx512_fpclass_pd_512;
13945 break;
13946 }
13947
13948 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13949 return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
13950 }
13951
13952 case X86::BI__builtin_ia32_vp2intersect_q_512:
13953 case X86::BI__builtin_ia32_vp2intersect_q_256:
13954 case X86::BI__builtin_ia32_vp2intersect_q_128:
13955 case X86::BI__builtin_ia32_vp2intersect_d_512:
13956 case X86::BI__builtin_ia32_vp2intersect_d_256:
13957 case X86::BI__builtin_ia32_vp2intersect_d_128: {
13958 unsigned NumElts =
13959 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13960 Intrinsic::ID ID;
13961
13962 switch (BuiltinID) {
13963 default: llvm_unreachable("Unsupported intrinsic!");
13964 case X86::BI__builtin_ia32_vp2intersect_q_512:
13965 ID = Intrinsic::x86_avx512_vp2intersect_q_512;
13966 break;
13967 case X86::BI__builtin_ia32_vp2intersect_q_256:
13968 ID = Intrinsic::x86_avx512_vp2intersect_q_256;
13969 break;
13970 case X86::BI__builtin_ia32_vp2intersect_q_128:
13971 ID = Intrinsic::x86_avx512_vp2intersect_q_128;
13972 break;
13973 case X86::BI__builtin_ia32_vp2intersect_d_512:
13974 ID = Intrinsic::x86_avx512_vp2intersect_d_512;
13975 break;
13976 case X86::BI__builtin_ia32_vp2intersect_d_256:
13977 ID = Intrinsic::x86_avx512_vp2intersect_d_256;
13978 break;
13979 case X86::BI__builtin_ia32_vp2intersect_d_128:
13980 ID = Intrinsic::x86_avx512_vp2intersect_d_128;
13981 break;
13982 }
13983
13984 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
13985 Value *Result = Builder.CreateExtractValue(Call, 0);
13986 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
13987 Builder.CreateDefaultAlignedStore(Result, Ops[2]);
13988
13989 Result = Builder.CreateExtractValue(Call, 1);
13990 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
13991 return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
13992 }
13993
13994 case X86::BI__builtin_ia32_vpmultishiftqb128:
13995 case X86::BI__builtin_ia32_vpmultishiftqb256:
13996 case X86::BI__builtin_ia32_vpmultishiftqb512: {
13997 Intrinsic::ID ID;
13998 switch (BuiltinID) {
13999 default: llvm_unreachable("Unsupported intrinsic!");
14000 case X86::BI__builtin_ia32_vpmultishiftqb128:
14001 ID = Intrinsic::x86_avx512_pmultishift_qb_128;
14002 break;
14003 case X86::BI__builtin_ia32_vpmultishiftqb256:
14004 ID = Intrinsic::x86_avx512_pmultishift_qb_256;
14005 break;
14006 case X86::BI__builtin_ia32_vpmultishiftqb512:
14007 ID = Intrinsic::x86_avx512_pmultishift_qb_512;
14008 break;
14009 }
14010
14011 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14012 }
14013
14014 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
14015 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
14016 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
14017 unsigned NumElts =
14018 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14019 Value *MaskIn = Ops[2];
14020 Ops.erase(&Ops[2]);
14021
14022 Intrinsic::ID ID;
14023 switch (BuiltinID) {
14024 default: llvm_unreachable("Unsupported intrinsic!");
14025 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
14026 ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
14027 break;
14028 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
14029 ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
14030 break;
14031 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
14032 ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
14033 break;
14034 }
14035
14036 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14037 return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
14038 }
14039
14040 // packed comparison intrinsics
14041 case X86::BI__builtin_ia32_cmpeqps:
14042 case X86::BI__builtin_ia32_cmpeqpd:
14043 return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
14044 case X86::BI__builtin_ia32_cmpltps:
14045 case X86::BI__builtin_ia32_cmpltpd:
14046 return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
14047 case X86::BI__builtin_ia32_cmpleps:
14048 case X86::BI__builtin_ia32_cmplepd:
14049 return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
14050 case X86::BI__builtin_ia32_cmpunordps:
14051 case X86::BI__builtin_ia32_cmpunordpd:
14052 return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
14053 case X86::BI__builtin_ia32_cmpneqps:
14054 case X86::BI__builtin_ia32_cmpneqpd:
14055 return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
14056 case X86::BI__builtin_ia32_cmpnltps:
14057 case X86::BI__builtin_ia32_cmpnltpd:
14058 return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
14059 case X86::BI__builtin_ia32_cmpnleps:
14060 case X86::BI__builtin_ia32_cmpnlepd:
14061 return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
14062 case X86::BI__builtin_ia32_cmpordps:
14063 case X86::BI__builtin_ia32_cmpordpd:
14064 return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
14065 case X86::BI__builtin_ia32_cmpps128_mask:
14066 case X86::BI__builtin_ia32_cmpps256_mask:
14067 case X86::BI__builtin_ia32_cmpps512_mask:
14068 case X86::BI__builtin_ia32_cmppd128_mask:
14069 case X86::BI__builtin_ia32_cmppd256_mask:
14070 case X86::BI__builtin_ia32_cmppd512_mask:
14071 IsMaskFCmp = true;
14072 LLVM_FALLTHROUGH;
14073 case X86::BI__builtin_ia32_cmpps:
14074 case X86::BI__builtin_ia32_cmpps256:
14075 case X86::BI__builtin_ia32_cmppd:
14076 case X86::BI__builtin_ia32_cmppd256: {
14077 // Lowering vector comparisons to fcmp instructions, while
14078 // ignoring signalling behaviour requested
14079 // ignoring rounding mode requested
14080 // This is only possible if fp-model is not strict and FENV_ACCESS is off.
14081
14082 // The third argument is the comparison condition, and integer in the
14083 // range [0, 31]
14084 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
14085
14086 // Lowering to IR fcmp instruction.
14087 // Ignoring requested signaling behaviour,
14088 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
14089 FCmpInst::Predicate Pred;
14090 bool IsSignaling;
14091 // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
14092 // behavior is inverted. We'll handle that after the switch.
14093 switch (CC & 0xf) {
14094 case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
14095 case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
14096 case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
14097 case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
14098 case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
14099 case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
14100 case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
14101 case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
14102 case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
14103 case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
14104 case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
14105 case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
14106 case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
14107 case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
14108 case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
14109 case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
14110 default: llvm_unreachable("Unhandled CC");
14111 }
14112
14113 // Invert the signalling behavior for 16-31.
14114 if (CC & 0x10)
14115 IsSignaling = !IsSignaling;
14116
14117 // If the predicate is true or false and we're using constrained intrinsics,
14118 // we don't have a compare intrinsic we can use. Just use the legacy X86
14119 // specific intrinsic.
14120 // If the intrinsic is mask enabled and we're using constrained intrinsics,
14121 // use the legacy X86 specific intrinsic.
14122 if (Builder.getIsFPConstrained() &&
14123 (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE ||
14124 IsMaskFCmp)) {
14125
14126 Intrinsic::ID IID;
14127 switch (BuiltinID) {
14128 default: llvm_unreachable("Unexpected builtin");
14129 case X86::BI__builtin_ia32_cmpps:
14130 IID = Intrinsic::x86_sse_cmp_ps;
14131 break;
14132 case X86::BI__builtin_ia32_cmpps256:
14133 IID = Intrinsic::x86_avx_cmp_ps_256;
14134 break;
14135 case X86::BI__builtin_ia32_cmppd:
14136 IID = Intrinsic::x86_sse2_cmp_pd;
14137 break;
14138 case X86::BI__builtin_ia32_cmppd256:
14139 IID = Intrinsic::x86_avx_cmp_pd_256;
14140 break;
14141 case X86::BI__builtin_ia32_cmpps512_mask:
14142 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
14143 break;
14144 case X86::BI__builtin_ia32_cmppd512_mask:
14145 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
14146 break;
14147 case X86::BI__builtin_ia32_cmpps128_mask:
14148 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
14149 break;
14150 case X86::BI__builtin_ia32_cmpps256_mask:
14151 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
14152 break;
14153 case X86::BI__builtin_ia32_cmppd128_mask:
14154 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
14155 break;
14156 case X86::BI__builtin_ia32_cmppd256_mask:
14157 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
14158 break;
14159 }
14160
14161 Function *Intr = CGM.getIntrinsic(IID);
14162 if (IsMaskFCmp) {
14163 unsigned NumElts =
14164 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14165 Ops[3] = getMaskVecValue(*this, Ops[3], NumElts);
14166 Value *Cmp = Builder.CreateCall(Intr, Ops);
14167 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr);
14168 }
14169
14170 return Builder.CreateCall(Intr, Ops);
14171 }
14172
14173 // Builtins without the _mask suffix return a vector of integers
14174 // of the same width as the input vectors
14175 if (IsMaskFCmp) {
14176 // We ignore SAE if strict FP is disabled. We only keep precise
14177 // exception behavior under strict FP.
14178 unsigned NumElts =
14179 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14180 Value *Cmp;
14181 if (IsSignaling)
14182 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
14183 else
14184 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
14185 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
14186 }
14187
14188 return getVectorFCmpIR(Pred, IsSignaling);
14189 }
14190
14191 // SSE scalar comparison intrinsics
14192 case X86::BI__builtin_ia32_cmpeqss:
14193 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
14194 case X86::BI__builtin_ia32_cmpltss:
14195 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
14196 case X86::BI__builtin_ia32_cmpless:
14197 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
14198 case X86::BI__builtin_ia32_cmpunordss:
14199 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
14200 case X86::BI__builtin_ia32_cmpneqss:
14201 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
14202 case X86::BI__builtin_ia32_cmpnltss:
14203 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
14204 case X86::BI__builtin_ia32_cmpnless:
14205 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
14206 case X86::BI__builtin_ia32_cmpordss:
14207 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
14208 case X86::BI__builtin_ia32_cmpeqsd:
14209 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
14210 case X86::BI__builtin_ia32_cmpltsd:
14211 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
14212 case X86::BI__builtin_ia32_cmplesd:
14213 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
14214 case X86::BI__builtin_ia32_cmpunordsd:
14215 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
14216 case X86::BI__builtin_ia32_cmpneqsd:
14217 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
14218 case X86::BI__builtin_ia32_cmpnltsd:
14219 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
14220 case X86::BI__builtin_ia32_cmpnlesd:
14221 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
14222 case X86::BI__builtin_ia32_cmpordsd:
14223 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
14224
14225 // f16c half2float intrinsics
14226 case X86::BI__builtin_ia32_vcvtph2ps:
14227 case X86::BI__builtin_ia32_vcvtph2ps256:
14228 case X86::BI__builtin_ia32_vcvtph2ps_mask:
14229 case X86::BI__builtin_ia32_vcvtph2ps256_mask:
14230 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
14231 return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
14232
14233// AVX512 bf16 intrinsics
14234 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
14235 Ops[2] = getMaskVecValue(
14236 *this, Ops[2],
14237 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
14238 Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
14239 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
14240 }
14241 case X86::BI__builtin_ia32_cvtsbf162ss_32:
14242 return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
14243
14244 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
14245 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
14246 Intrinsic::ID IID;
14247 switch (BuiltinID) {
14248 default: llvm_unreachable("Unsupported intrinsic!");
14249 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
14250 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
14251 break;
14252 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
14253 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
14254 break;
14255 }
14256 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
14257 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
14258 }
14259
14260 case X86::BI__emul:
14261 case X86::BI__emulu: {
14262 llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
14263 bool isSigned = (BuiltinID == X86::BI__emul);
14264 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
14265 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
14266 return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
14267 }
14268 case X86::BI__mulh:
14269 case X86::BI__umulh:
14270 case X86::BI_mul128:
14271 case X86::BI_umul128: {
14272 llvm::Type *ResType = ConvertType(E->getType());
14273 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
14274
14275 bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
14276 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
14277 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
14278
14279 Value *MulResult, *HigherBits;
14280 if (IsSigned) {
14281 MulResult = Builder.CreateNSWMul(LHS, RHS);
14282 HigherBits = Builder.CreateAShr(MulResult, 64);
14283 } else {
14284 MulResult = Builder.CreateNUWMul(LHS, RHS);
14285 HigherBits = Builder.CreateLShr(MulResult, 64);
14286 }
14287 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
14288
14289 if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
14290 return HigherBits;
14291
14292 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
14293 Builder.CreateStore(HigherBits, HighBitsAddress);
14294 return Builder.CreateIntCast(MulResult, ResType, IsSigned);
14295 }
14296
14297 case X86::BI__faststorefence: {
14298 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
14299 llvm::SyncScope::System);
14300 }
14301 case X86::BI__shiftleft128:
14302 case X86::BI__shiftright128: {
14303 llvm::Function *F = CGM.getIntrinsic(
14304 BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
14305 Int64Ty);
14306 // Flip low/high ops and zero-extend amount to matching type.
14307 // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt)
14308 // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt)
14309 std::swap(Ops[0], Ops[1]);
14310 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
14311 return Builder.CreateCall(F, Ops);
14312 }
14313 case X86::BI_ReadWriteBarrier:
14314 case X86::BI_ReadBarrier:
14315 case X86::BI_WriteBarrier: {
14316 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
14317 llvm::SyncScope::SingleThread);
14318 }
14319
14320 case X86::BI_AddressOfReturnAddress: {
14321 Function *F =
14322 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
14323 return Builder.CreateCall(F);
14324 }
14325 case X86::BI__stosb: {
14326 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
14327 // instruction, but it will create a memset that won't be optimized away.
14328 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
14329 }
14330 case X86::BI__ud2:
14331 // llvm.trap makes a ud2a instruction on x86.
14332 return EmitTrapCall(Intrinsic::trap);
14333 case X86::BI__int2c: {
14334 // This syscall signals a driver assertion failure in x86 NT kernels.
14335 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
14336 llvm::InlineAsm *IA =
14337 llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
14338 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
14339 getLLVMContext(), llvm::AttributeList::FunctionIndex,
14340 llvm::Attribute::NoReturn);
14341 llvm::CallInst *CI = Builder.CreateCall(IA);
14342 CI->setAttributes(NoReturnAttr);
14343 return CI;
14344 }
14345 case X86::BI__readfsbyte:
14346 case X86::BI__readfsword:
14347 case X86::BI__readfsdword:
14348 case X86::BI__readfsqword: {
14349 llvm::Type *IntTy = ConvertType(E->getType());
14350 Value *Ptr =
14351 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
14352 LoadInst *Load = Builder.CreateAlignedLoad(
14353 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14354 Load->setVolatile(true);
14355 return Load;
14356 }
14357 case X86::BI__readgsbyte:
14358 case X86::BI__readgsword:
14359 case X86::BI__readgsdword:
14360 case X86::BI__readgsqword: {
14361 llvm::Type *IntTy = ConvertType(E->getType());
14362 Value *Ptr =
14363 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
14364 LoadInst *Load = Builder.CreateAlignedLoad(
14365 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14366 Load->setVolatile(true);
14367 return Load;
14368 }
14369 case X86::BI__builtin_ia32_paddsb512:
14370 case X86::BI__builtin_ia32_paddsw512:
14371 case X86::BI__builtin_ia32_paddsb256:
14372 case X86::BI__builtin_ia32_paddsw256:
14373 case X86::BI__builtin_ia32_paddsb128:
14374 case X86::BI__builtin_ia32_paddsw128:
14375 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat);
14376 case X86::BI__builtin_ia32_paddusb512:
14377 case X86::BI__builtin_ia32_paddusw512:
14378 case X86::BI__builtin_ia32_paddusb256:
14379 case X86::BI__builtin_ia32_paddusw256:
14380 case X86::BI__builtin_ia32_paddusb128:
14381 case X86::BI__builtin_ia32_paddusw128:
14382 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat);
14383 case X86::BI__builtin_ia32_psubsb512:
14384 case X86::BI__builtin_ia32_psubsw512:
14385 case X86::BI__builtin_ia32_psubsb256:
14386 case X86::BI__builtin_ia32_psubsw256:
14387 case X86::BI__builtin_ia32_psubsb128:
14388 case X86::BI__builtin_ia32_psubsw128:
14389 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat);
14390 case X86::BI__builtin_ia32_psubusb512:
14391 case X86::BI__builtin_ia32_psubusw512:
14392 case X86::BI__builtin_ia32_psubusb256:
14393 case X86::BI__builtin_ia32_psubusw256:
14394 case X86::BI__builtin_ia32_psubusb128:
14395 case X86::BI__builtin_ia32_psubusw128:
14396 return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat);
14397 case X86::BI__builtin_ia32_encodekey128_u32: {
14398 Intrinsic::ID IID = Intrinsic::x86_encodekey128;
14399
14400 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]});
14401
14402 for (int i = 0; i < 6; ++i) {
14403 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14404 Value *Ptr = Builder.CreateConstGEP1_32(Ops[2], i * 16);
14405 Ptr = Builder.CreateBitCast(
14406 Ptr, llvm::PointerType::getUnqual(Extract->getType()));
14407 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
14408 }
14409
14410 return Builder.CreateExtractValue(Call, 0);
14411 }
14412 case X86::BI__builtin_ia32_encodekey256_u32: {
14413 Intrinsic::ID IID = Intrinsic::x86_encodekey256;
14414
14415 Value *Call =
14416 Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
14417
14418 for (int i = 0; i < 7; ++i) {
14419 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14420 Value *Ptr = Builder.CreateConstGEP1_32(Ops[3], i * 16);
14421 Ptr = Builder.CreateBitCast(
14422 Ptr, llvm::PointerType::getUnqual(Extract->getType()));
14423 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
14424 }
14425
14426 return Builder.CreateExtractValue(Call, 0);
14427 }
14428 case X86::BI__builtin_ia32_aesenc128kl_u8:
14429 case X86::BI__builtin_ia32_aesdec128kl_u8:
14430 case X86::BI__builtin_ia32_aesenc256kl_u8:
14431 case X86::BI__builtin_ia32_aesdec256kl_u8: {
14432 Intrinsic::ID IID;
14433 switch (BuiltinID) {
14434 default: llvm_unreachable("Unexpected builtin");
14435 case X86::BI__builtin_ia32_aesenc128kl_u8:
14436 IID = Intrinsic::x86_aesenc128kl;
14437 break;
14438 case X86::BI__builtin_ia32_aesdec128kl_u8:
14439 IID = Intrinsic::x86_aesdec128kl;
14440 break;
14441 case X86::BI__builtin_ia32_aesenc256kl_u8:
14442 IID = Intrinsic::x86_aesenc256kl;
14443 break;
14444 case X86::BI__builtin_ia32_aesdec256kl_u8:
14445 IID = Intrinsic::x86_aesdec256kl;
14446 break;
14447 }
14448
14449 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]});
14450
14451 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
14452 Ops[0]);
14453
14454 return Builder.CreateExtractValue(Call, 0);
14455 }
14456 case X86::BI__builtin_ia32_aesencwide128kl_u8:
14457 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
14458 case X86::BI__builtin_ia32_aesencwide256kl_u8:
14459 case X86::BI__builtin_ia32_aesdecwide256kl_u8: {
14460 Intrinsic::ID IID;
14461 switch (BuiltinID) {
14462 case X86::BI__builtin_ia32_aesencwide128kl_u8:
14463 IID = Intrinsic::x86_aesencwide128kl;
14464 break;
14465 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
14466 IID = Intrinsic::x86_aesdecwide128kl;
14467 break;
14468 case X86::BI__builtin_ia32_aesencwide256kl_u8:
14469 IID = Intrinsic::x86_aesencwide256kl;
14470 break;
14471 case X86::BI__builtin_ia32_aesdecwide256kl_u8:
14472 IID = Intrinsic::x86_aesdecwide256kl;
14473 break;
14474 }
14475
14476 Value *InOps[9];
14477 InOps[0] = Ops[2];
14478 for (int i = 0; i != 8; ++i) {
14479 Value *Ptr = Builder.CreateConstGEP1_32(Ops[1], i);
14480 InOps[i + 1] = Builder.CreateAlignedLoad(Ptr, Align(16));
14481 }
14482
14483 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps);
14484
14485 for (int i = 0; i != 8; ++i) {
14486 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14487 Value *Ptr = Builder.CreateConstGEP1_32(Ops[0], i);
14488 Builder.CreateAlignedStore(Extract, Ptr, Align(16));
14489 }
14490
14491 return Builder.CreateExtractValue(Call, 0);
14492 }
14493 }
14494}
14495
14496Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
14497 const CallExpr *E) {
14498 SmallVector<Value*, 4> Ops;
14499
14500 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
14501 Ops.push_back(EmitScalarExpr(E->getArg(i)));
14502
14503 Intrinsic::ID ID = Intrinsic::not_intrinsic;
14504
14505 switch (BuiltinID) {
14506 default: return nullptr;
14507
14508 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
14509 // call __builtin_readcyclecounter.
14510 case PPC::BI__builtin_ppc_get_timebase:
14511 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
14512
14513 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
14514 case PPC::BI__builtin_altivec_lvx:
14515 case PPC::BI__builtin_altivec_lvxl:
14516 case PPC::BI__builtin_altivec_lvebx:
14517 case PPC::BI__builtin_altivec_lvehx:
14518 case PPC::BI__builtin_altivec_lvewx:
14519 case PPC::BI__builtin_altivec_lvsl:
14520 case PPC::BI__builtin_altivec_lvsr:
14521 case PPC::BI__builtin_vsx_lxvd2x:
14522 case PPC::BI__builtin_vsx_lxvw4x:
14523 case PPC::BI__builtin_vsx_lxvd2x_be:
14524 case PPC::BI__builtin_vsx_lxvw4x_be:
14525 case PPC::BI__builtin_vsx_lxvl:
14526 case PPC::BI__builtin_vsx_lxvll:
14527 {
14528 if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
14529 BuiltinID == PPC::BI__builtin_vsx_lxvll){
14530 Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
14531 }else {
14532 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14533 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
14534 Ops.pop_back();
14535 }
14536
14537 switch (BuiltinID) {
14538 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
14539 case PPC::BI__builtin_altivec_lvx:
14540 ID = Intrinsic::ppc_altivec_lvx;
14541 break;
14542 case PPC::BI__builtin_altivec_lvxl:
14543 ID = Intrinsic::ppc_altivec_lvxl;
14544 break;
14545 case PPC::BI__builtin_altivec_lvebx:
14546 ID = Intrinsic::ppc_altivec_lvebx;
14547 break;
14548 case PPC::BI__builtin_altivec_lvehx:
14549 ID = Intrinsic::ppc_altivec_lvehx;
14550 break;
14551 case PPC::BI__builtin_altivec_lvewx:
14552 ID = Intrinsic::ppc_altivec_lvewx;
14553 break;
14554 case PPC::BI__builtin_altivec_lvsl:
14555 ID = Intrinsic::ppc_altivec_lvsl;
14556 break;
14557 case PPC::BI__builtin_altivec_lvsr:
14558 ID = Intrinsic::ppc_altivec_lvsr;
14559 break;
14560 case PPC::BI__builtin_vsx_lxvd2x:
14561 ID = Intrinsic::ppc_vsx_lxvd2x;
14562 break;
14563 case PPC::BI__builtin_vsx_lxvw4x:
14564 ID = Intrinsic::ppc_vsx_lxvw4x;
14565 break;
14566 case PPC::BI__builtin_vsx_lxvd2x_be:
14567 ID = Intrinsic::ppc_vsx_lxvd2x_be;
14568 break;
14569 case PPC::BI__builtin_vsx_lxvw4x_be:
14570 ID = Intrinsic::ppc_vsx_lxvw4x_be;
14571 break;
14572 case PPC::BI__builtin_vsx_lxvl:
14573 ID = Intrinsic::ppc_vsx_lxvl;
14574 break;
14575 case PPC::BI__builtin_vsx_lxvll:
14576 ID = Intrinsic::ppc_vsx_lxvll;
14577 break;
14578 }
14579 llvm::Function *F = CGM.getIntrinsic(ID);
14580 return Builder.CreateCall(F, Ops, "");
14581 }
14582
14583 // vec_st, vec_xst_be
14584 case PPC::BI__builtin_altivec_stvx:
14585 case PPC::BI__builtin_altivec_stvxl:
14586 case PPC::BI__builtin_altivec_stvebx:
14587 case PPC::BI__builtin_altivec_stvehx:
14588 case PPC::BI__builtin_altivec_stvewx:
14589 case PPC::BI__builtin_vsx_stxvd2x:
14590 case PPC::BI__builtin_vsx_stxvw4x:
14591 case PPC::BI__builtin_vsx_stxvd2x_be:
14592 case PPC::BI__builtin_vsx_stxvw4x_be:
14593 case PPC::BI__builtin_vsx_stxvl:
14594 case PPC::BI__builtin_vsx_stxvll:
14595 {
14596 if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
14597 BuiltinID == PPC::BI__builtin_vsx_stxvll ){
14598 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
14599 }else {
14600 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
14601 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
14602 Ops.pop_back();
14603 }
14604
14605 switch (BuiltinID) {
14606 default: llvm_unreachable("Unsupported st intrinsic!");
14607 case PPC::BI__builtin_altivec_stvx:
14608 ID = Intrinsic::ppc_altivec_stvx;
14609 break;
14610 case PPC::BI__builtin_altivec_stvxl:
14611 ID = Intrinsic::ppc_altivec_stvxl;
14612 break;
14613 case PPC::BI__builtin_altivec_stvebx:
14614 ID = Intrinsic::ppc_altivec_stvebx;
14615 break;
14616 case PPC::BI__builtin_altivec_stvehx:
14617 ID = Intrinsic::ppc_altivec_stvehx;
14618 break;
14619 case PPC::BI__builtin_altivec_stvewx:
14620 ID = Intrinsic::ppc_altivec_stvewx;
14621 break;
14622 case PPC::BI__builtin_vsx_stxvd2x:
14623 ID = Intrinsic::ppc_vsx_stxvd2x;
14624 break;
14625 case PPC::BI__builtin_vsx_stxvw4x:
14626 ID = Intrinsic::ppc_vsx_stxvw4x;
14627 break;
14628 case PPC::BI__builtin_vsx_stxvd2x_be:
14629 ID = Intrinsic::ppc_vsx_stxvd2x_be;
14630 break;
14631 case PPC::BI__builtin_vsx_stxvw4x_be:
14632 ID = Intrinsic::ppc_vsx_stxvw4x_be;
14633 break;
14634 case PPC::BI__builtin_vsx_stxvl:
14635 ID = Intrinsic::ppc_vsx_stxvl;
14636 break;
14637 case PPC::BI__builtin_vsx_stxvll:
14638 ID = Intrinsic::ppc_vsx_stxvll;
14639 break;
14640 }
14641 llvm::Function *F = CGM.getIntrinsic(ID);
14642 return Builder.CreateCall(F, Ops, "");
14643 }
14644 // Square root
14645 case PPC::BI__builtin_vsx_xvsqrtsp:
14646 case PPC::BI__builtin_vsx_xvsqrtdp: {
14647 llvm::Type *ResultType = ConvertType(E->getType());
14648 Value *X = EmitScalarExpr(E->getArg(0));
14649 if (Builder.getIsFPConstrained()) {
14650 llvm::Function *F = CGM.getIntrinsic(
14651 Intrinsic::experimental_constrained_sqrt, ResultType);
14652 return Builder.CreateConstrainedFPCall(F, X);
14653 } else {
14654 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
14655 return Builder.CreateCall(F, X);
14656 }
14657 }
14658 // Count leading zeros
14659 case PPC::BI__builtin_altivec_vclzb:
14660 case PPC::BI__builtin_altivec_vclzh:
14661 case PPC::BI__builtin_altivec_vclzw:
14662 case PPC::BI__builtin_altivec_vclzd: {
14663 llvm::Type *ResultType = ConvertType(E->getType());
14664 Value *X = EmitScalarExpr(E->getArg(0));
14665 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14666 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
14667 return Builder.CreateCall(F, {X, Undef});
14668 }
14669 case PPC::BI__builtin_altivec_vctzb:
14670 case PPC::BI__builtin_altivec_vctzh:
14671 case PPC::BI__builtin_altivec_vctzw:
14672 case PPC::BI__builtin_altivec_vctzd: {
14673 llvm::Type *ResultType = ConvertType(E->getType());
14674 Value *X = EmitScalarExpr(E->getArg(0));
14675 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
14676 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
14677 return Builder.CreateCall(F, {X, Undef});
14678 }
14679 case PPC::BI__builtin_altivec_vec_replace_elt:
14680 case PPC::BI__builtin_altivec_vec_replace_unaligned: {
14681 // The third argument of vec_replace_elt and vec_replace_unaligned must
14682 // be a compile time constant and will be emitted either to the vinsw
14683 // or vinsd instruction.
14684 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14685 assert(ArgCI &&
14686 "Third Arg to vinsw/vinsd intrinsic must be a constant integer!");
14687 llvm::Type *ResultType = ConvertType(E->getType());
14688 llvm::Function *F = nullptr;
14689 Value *Call = nullptr;
14690 int64_t ConstArg = ArgCI->getSExtValue();
14691 unsigned ArgWidth = Ops[1]->getType()->getPrimitiveSizeInBits();
14692 bool Is32Bit = false;
14693 assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width");
14694 // The input to vec_replace_elt is an element index, not a byte index.
14695 if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt)
14696 ConstArg *= ArgWidth / 8;
14697 if (ArgWidth == 32) {
14698 Is32Bit = true;
14699 // When the second argument is 32 bits, it can either be an integer or
14700 // a float. The vinsw intrinsic is used in this case.
14701 F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw);
14702 // Fix the constant according to endianess.
14703 if (getTarget().isLittleEndian())
14704 ConstArg = 12 - ConstArg;
14705 } else {
14706 // When the second argument is 64 bits, it can either be a long long or
14707 // a double. The vinsd intrinsic is used in this case.
14708 F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd);
14709 // Fix the constant for little endian.
14710 if (getTarget().isLittleEndian())
14711 ConstArg = 8 - ConstArg;
14712 }
14713 Ops[2] = ConstantInt::getSigned(Int32Ty, ConstArg);
14714 // Depending on ArgWidth, the input vector could be a float or a double.
14715 // If the input vector is a float type, bitcast the inputs to integers. Or,
14716 // if the input vector is a double, bitcast the inputs to 64-bit integers.
14717 if (!Ops[1]->getType()->isIntegerTy(ArgWidth)) {
14718 Ops[0] = Builder.CreateBitCast(
14719 Ops[0], Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4)
14720 : llvm::FixedVectorType::get(Int64Ty, 2));
14721 Ops[1] = Builder.CreateBitCast(Ops[1], Is32Bit ? Int32Ty : Int64Ty);
14722 }
14723 // Emit the call to vinsw or vinsd.
14724 Call = Builder.CreateCall(F, Ops);
14725 // Depending on the builtin, bitcast to the approriate result type.
14726 if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
14727 !Ops[1]->getType()->isIntegerTy())
14728 return Builder.CreateBitCast(Call, ResultType);
14729 else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
14730 Ops[1]->getType()->isIntegerTy())
14731 return Call;
14732 else
14733 return Builder.CreateBitCast(Call,
14734 llvm::FixedVectorType::get(Int8Ty, 16));
14735 }
14736 case PPC::BI__builtin_altivec_vpopcntb:
14737 case PPC::BI__builtin_altivec_vpopcnth:
14738 case PPC::BI__builtin_altivec_vpopcntw:
14739 case PPC::BI__builtin_altivec_vpopcntd: {
14740 llvm::Type *ResultType = ConvertType(E->getType());
14741 Value *X = EmitScalarExpr(E->getArg(0));
14742 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
14743 return Builder.CreateCall(F, X);
14744 }
14745 // Copy sign
14746 case PPC::BI__builtin_vsx_xvcpsgnsp:
14747 case PPC::BI__builtin_vsx_xvcpsgndp: {
14748 llvm::Type *ResultType = ConvertType(E->getType());
14749 Value *X = EmitScalarExpr(E->getArg(0));
14750 Value *Y = EmitScalarExpr(E->getArg(1));
14751 ID = Intrinsic::copysign;
14752 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
14753 return Builder.CreateCall(F, {X, Y});
14754 }
14755 // Rounding/truncation
14756 case PPC::BI__builtin_vsx_xvrspip:
14757 case PPC::BI__builtin_vsx_xvrdpip:
14758 case PPC::BI__builtin_vsx_xvrdpim:
14759 case PPC::BI__builtin_vsx_xvrspim:
14760 case PPC::BI__builtin_vsx_xvrdpi:
14761 case PPC::BI__builtin_vsx_xvrspi:
14762 case PPC::BI__builtin_vsx_xvrdpic:
14763 case PPC::BI__builtin_vsx_xvrspic:
14764 case PPC::BI__builtin_vsx_xvrdpiz:
14765 case PPC::BI__builtin_vsx_xvrspiz: {
14766 llvm::Type *ResultType = ConvertType(E->getType());
14767 Value *X = EmitScalarExpr(E->getArg(0));
14768 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
14769 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
14770 ID = Builder.getIsFPConstrained()
14771 ? Intrinsic::experimental_constrained_floor
14772 : Intrinsic::floor;
14773 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
14774 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
14775 ID = Builder.getIsFPConstrained()
14776 ? Intrinsic::experimental_constrained_round
14777 : Intrinsic::round;
14778 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
14779 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
14780 ID = Builder.getIsFPConstrained()
14781 ? Intrinsic::experimental_constrained_rint
14782 : Intrinsic::rint;
14783 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
14784 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
14785 ID = Builder.getIsFPConstrained()
14786 ? Intrinsic::experimental_constrained_ceil
14787 : Intrinsic::ceil;
14788 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
14789 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
14790 ID = Builder.getIsFPConstrained()
14791 ? Intrinsic::experimental_constrained_trunc
14792 : Intrinsic::trunc;
14793 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
14794 return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
14795 : Builder.CreateCall(F, X);
14796 }
14797
14798 // Absolute value
14799 case PPC::BI__builtin_vsx_xvabsdp:
14800 case PPC::BI__builtin_vsx_xvabssp: {
14801 llvm::Type *ResultType = ConvertType(E->getType());
14802 Value *X = EmitScalarExpr(E->getArg(0));
14803 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
14804 return Builder.CreateCall(F, X);
14805 }
14806
14807 // FMA variations
14808 case PPC::BI__builtin_vsx_xvmaddadp:
14809 case PPC::BI__builtin_vsx_xvmaddasp:
14810 case PPC::BI__builtin_vsx_xvnmaddadp:
14811 case PPC::BI__builtin_vsx_xvnmaddasp:
14812 case PPC::BI__builtin_vsx_xvmsubadp:
14813 case PPC::BI__builtin_vsx_xvmsubasp:
14814 case PPC::BI__builtin_vsx_xvnmsubadp:
14815 case PPC::BI__builtin_vsx_xvnmsubasp: {
14816 llvm::Type *ResultType = ConvertType(E->getType());
14817 Value *X = EmitScalarExpr(E->getArg(0));
14818 Value *Y = EmitScalarExpr(E->getArg(1));
14819 Value *Z = EmitScalarExpr(E->getArg(2));
14820 llvm::Function *F;
14821 if (Builder.getIsFPConstrained())
14822 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
14823 else
14824 F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
14825 switch (BuiltinID) {
14826 case PPC::BI__builtin_vsx_xvmaddadp:
14827 case PPC::BI__builtin_vsx_xvmaddasp:
14828 if (Builder.getIsFPConstrained())
14829 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
14830 else
14831 return Builder.CreateCall(F, {X, Y, Z});
14832 case PPC::BI__builtin_vsx_xvnmaddadp:
14833 case PPC::BI__builtin_vsx_xvnmaddasp:
14834 if (Builder.getIsFPConstrained())
14835 return Builder.CreateFNeg(
14836 Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
14837 else
14838 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
14839 case PPC::BI__builtin_vsx_xvmsubadp:
14840 case PPC::BI__builtin_vsx_xvmsubasp:
14841 if (Builder.getIsFPConstrained())
14842 return Builder.CreateConstrainedFPCall(
14843 F, {X, Y, Builder.CreateFNeg(Z, "neg")});
14844 else
14845 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
14846 case PPC::BI__builtin_vsx_xvnmsubadp:
14847 case PPC::BI__builtin_vsx_xvnmsubasp:
14848 if (Builder.getIsFPConstrained())
14849 return Builder.CreateFNeg(
14850 Builder.CreateConstrainedFPCall(
14851 F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
14852 "neg");
14853 else
14854 return Builder.CreateFNeg(
14855 Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
14856 "neg");
14857 }
14858 llvm_unreachable("Unknown FMA operation");
14859 return nullptr; // Suppress no-return warning
14860 }
14861
14862 case PPC::BI__builtin_vsx_insertword: {
14863 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
14864
14865 // Third argument is a compile time constant int. It must be clamped to
14866 // to the range [0, 12].
14867 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14868 assert(ArgCI &&
14869 "Third arg to xxinsertw intrinsic must be constant integer");
14870 const int64_t MaxIndex = 12;
14871 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
14872
14873 // The builtin semantics don't exactly match the xxinsertw instructions
14874 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
14875 // word from the first argument, and inserts it in the second argument. The
14876 // instruction extracts the word from its second input register and inserts
14877 // it into its first input register, so swap the first and second arguments.
14878 std::swap(Ops[0], Ops[1]);
14879
14880 // Need to cast the second argument from a vector of unsigned int to a
14881 // vector of long long.
14882 Ops[1] =
14883 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
14884
14885 if (getTarget().isLittleEndian()) {
14886 // Reverse the double words in the vector we will extract from.
14887 Ops[0] =
14888 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14889 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
14890
14891 // Reverse the index.
14892 Index = MaxIndex - Index;
14893 }
14894
14895 // Intrinsic expects the first arg to be a vector of int.
14896 Ops[0] =
14897 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
14898 Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
14899 return Builder.CreateCall(F, Ops);
14900 }
14901
14902 case PPC::BI__builtin_vsx_extractuword: {
14903 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
14904
14905 // Intrinsic expects the first argument to be a vector of doublewords.
14906 Ops[0] =
14907 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14908
14909 // The second argument is a compile time constant int that needs to
14910 // be clamped to the range [0, 12].
14911 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
14912 assert(ArgCI &&
14913 "Second Arg to xxextractuw intrinsic must be a constant integer!");
14914 const int64_t MaxIndex = 12;
14915 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
14916
14917 if (getTarget().isLittleEndian()) {
14918 // Reverse the index.
14919 Index = MaxIndex - Index;
14920 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
14921
14922 // Emit the call, then reverse the double words of the results vector.
14923 Value *Call = Builder.CreateCall(F, Ops);
14924
14925 Value *ShuffleCall =
14926 Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
14927 return ShuffleCall;
14928 } else {
14929 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
14930 return Builder.CreateCall(F, Ops);
14931 }
14932 }
14933
14934 case PPC::BI__builtin_vsx_xxpermdi: {
14935 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14936 assert(ArgCI && "Third arg must be constant integer!");
14937
14938 unsigned Index = ArgCI->getZExtValue();
14939 Ops[0] =
14940 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
14941 Ops[1] =
14942 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
14943
14944 // Account for endianness by treating this as just a shuffle. So we use the
14945 // same indices for both LE and BE in order to produce expected results in
14946 // both cases.
14947 int ElemIdx0 = (Index & 2) >> 1;
14948 int ElemIdx1 = 2 + (Index & 1);
14949
14950 int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
14951 Value *ShuffleCall =
14952 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
14953 QualType BIRetType = E->getType();
14954 auto RetTy = ConvertType(BIRetType);
14955 return Builder.CreateBitCast(ShuffleCall, RetTy);
14956 }
14957
14958 case PPC::BI__builtin_vsx_xxsldwi: {
14959 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
14960 assert(ArgCI && "Third argument must be a compile time constant");
14961 unsigned Index = ArgCI->getZExtValue() & 0x3;
14962 Ops[0] =
14963 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
14964 Ops[1] =
14965 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
14966
14967 // Create a shuffle mask
14968 int ElemIdx0;
14969 int ElemIdx1;
14970 int ElemIdx2;
14971 int ElemIdx3;
14972 if (getTarget().isLittleEndian()) {
14973 // Little endian element N comes from element 8+N-Index of the
14974 // concatenated wide vector (of course, using modulo arithmetic on
14975 // the total number of elements).
14976 ElemIdx0 = (8 - Index) % 8;
14977 ElemIdx1 = (9 - Index) % 8;
14978 ElemIdx2 = (10 - Index) % 8;
14979 ElemIdx3 = (11 - Index) % 8;
14980 } else {
14981 // Big endian ElemIdx<N> = Index + N
14982 ElemIdx0 = Index;
14983 ElemIdx1 = Index + 1;
14984 ElemIdx2 = Index + 2;
14985 ElemIdx3 = Index + 3;
14986 }
14987
14988 int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
14989 Value *ShuffleCall =
14990 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
14991 QualType BIRetType = E->getType();
14992 auto RetTy = ConvertType(BIRetType);
14993 return Builder.CreateBitCast(ShuffleCall, RetTy);
14994 }
14995
14996 case PPC::BI__builtin_pack_vector_int128: {
14997 bool isLittleEndian = getTarget().isLittleEndian();
14998 Value *UndefValue =
14999 llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
15000 Value *Res = Builder.CreateInsertElement(
15001 UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
15002 Res = Builder.CreateInsertElement(Res, Ops[1],
15003 (uint64_t)(isLittleEndian ? 0 : 1));
15004 return Builder.CreateBitCast(Res, ConvertType(E->getType()));
15005 }
15006
15007 case PPC::BI__builtin_unpack_vector_int128: {
15008 ConstantInt *Index = cast<ConstantInt>(Ops[1]);
15009 Value *Unpacked = Builder.CreateBitCast(
15010 Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
15011
15012 if (getTarget().isLittleEndian())
15013 Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
15014
15015 return Builder.CreateExtractElement(Unpacked, Index);
15016 }
15017
15018 // The PPC MMA builtins take a pointer to a __vector_quad as an argument.
15019 // Some of the MMA instructions accumulate their result into an existing
15020 // accumulator whereas the others generate a new accumulator. So we need to
15021 // use custom code generation to expand a builtin call with a pointer to a
15022 // load (if the corresponding instruction accumulates its result) followed by
15023 // the call to the intrinsic and a store of the result.
15024#define CUSTOM_BUILTIN(Name, Types, Accumulate) \
15025 case PPC::BI__builtin_##Name:
15026#include "clang/Basic/BuiltinsPPC.def"
15027 {
15028 // The first argument of these two builtins is a pointer used to store their
15029 // result. However, the llvm intrinsics return their result in multiple
15030 // return values. So, here we emit code extracting these values from the
15031 // intrinsic results and storing them using that pointer.
15032 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
15033 BuiltinID == PPC::BI__builtin_vsx_disassemble_pair) {
15034 unsigned NumVecs = 2;
15035 auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair;
15036 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
15037 NumVecs = 4;
15038 Intrinsic = Intrinsic::ppc_mma_disassemble_acc;
15039 }
15040 llvm::Function *F = CGM.getIntrinsic(Intrinsic);
15041 Address Addr = EmitPointerWithAlignment(E->getArg(1));
15042 Value *Vec = Builder.CreateLoad(Addr);
15043 Value *Call = Builder.CreateCall(F, {Vec});
15044 llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
15045 Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo());
15046 for (unsigned i=0; i<NumVecs; i++) {
15047 Value *Vec = Builder.CreateExtractValue(Call, i);
15048 llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
15049 Value *GEP = Builder.CreateInBoundsGEP(Ptr, Index);
15050 Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16));
15051 }
15052 return Call;
15053 }
15054 bool Accumulate;
15055 switch (BuiltinID) {
15056 #define CUSTOM_BUILTIN(Name, Types, Acc) \
15057 case PPC::BI__builtin_##Name: \
15058 ID = Intrinsic::ppc_##Name; \
15059 Accumulate = Acc; \
15060 break;
15061 #include "clang/Basic/BuiltinsPPC.def"
15062 }
15063 if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
15064 BuiltinID == PPC::BI__builtin_vsx_stxvp) {
15065 if (BuiltinID == PPC::BI__builtin_vsx_lxvp) {
15066 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
15067 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
15068 } else {
15069 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
15070 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
15071 }
15072 Ops.pop_back();
15073 llvm::Function *F = CGM.getIntrinsic(ID);
15074 return Builder.CreateCall(F, Ops, "");
15075 }
15076 SmallVector<Value*, 4> CallOps;
15077 if (Accumulate) {
15078 Address Addr = EmitPointerWithAlignment(E->getArg(0));
15079 Value *Acc = Builder.CreateLoad(Addr);
15080 CallOps.push_back(Acc);
15081 }
15082 for (unsigned i=1; i<Ops.size(); i++)
15083 CallOps.push_back(Ops[i]);
15084 llvm::Function *F = CGM.getIntrinsic(ID);
15085 Value *Call = Builder.CreateCall(F, CallOps);
15086 return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
15087 }
15088 }
15089}
15090
15091namespace {
15092// If \p E is not null pointer, insert address space cast to match return
15093// type of \p E if necessary.
15094Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
15095 const CallExpr *E = nullptr) {
15096 auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
15097 auto *Call = CGF.Builder.CreateCall(F);
15098 Call->addAttribute(
15099 AttributeList::ReturnIndex,
15100 Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
15101 Call->addAttribute(AttributeList::ReturnIndex,
15102 Attribute::getWithAlignment(Call->getContext(), Align(4)));
15103 if (!E)
15104 return Call;
15105 QualType BuiltinRetType = E->getType();
15106 auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
15107 if (RetTy == Call->getType())
15108 return Call;
15109 return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
15110}
15111
15112// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
15113Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
15114 const unsigned XOffset = 4;
15115 auto *DP = EmitAMDGPUDispatchPtr(CGF);
15116 // Indexing the HSA kernel_dispatch_packet struct.
15117 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
15118 auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
15119 auto *DstTy =
15120 CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
15121 auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
15122 auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
15123 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
15124 llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
15125 APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
15126 LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
15127 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
15128 llvm::MDNode::get(CGF.getLLVMContext(), None));
15129 return LD;
15130}
15131
15132// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
15133Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
15134 const unsigned XOffset = 12;
15135 auto *DP = EmitAMDGPUDispatchPtr(CGF);
15136 // Indexing the HSA kernel_dispatch_packet struct.
15137 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
15138 auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
15139 auto *DstTy =
15140 CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
15141 auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
15142 auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4)));
15143 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
15144 llvm::MDNode::get(CGF.getLLVMContext(), None));
15145 return LD;
15146}
15147} // namespace
15148
15149// For processing memory ordering and memory scope arguments of various
15150// amdgcn builtins.
15151// \p Order takes a C++11 comptabile memory-ordering specifier and converts
15152// it into LLVM's memory ordering specifier using atomic C ABI, and writes
15153// to \p AO. \p Scope takes a const char * and converts it into AMDGCN
15154// specific SyncScopeID and writes it to \p SSID.
15155bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
15156 llvm::AtomicOrdering &AO,
15157 llvm::SyncScope::ID &SSID) {
15158 if (isa<llvm::ConstantInt>(Order)) {
15159 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
15160
15161 // Map C11/C++11 memory ordering to LLVM memory ordering
15162 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
15163 case llvm::AtomicOrderingCABI::acquire:
15164 AO = llvm::AtomicOrdering::Acquire;
15165 break;
15166 case llvm::AtomicOrderingCABI::release:
15167 AO = llvm::AtomicOrdering::Release;
15168 break;
15169 case llvm::AtomicOrderingCABI::acq_rel:
15170 AO = llvm::AtomicOrdering::AcquireRelease;
15171 break;
15172 case llvm::AtomicOrderingCABI::seq_cst:
15173 AO = llvm::AtomicOrdering::SequentiallyConsistent;
15174 break;
15175 case llvm::AtomicOrderingCABI::consume:
15176 case llvm::AtomicOrderingCABI::relaxed:
15177 break;
15178 }
15179
15180 StringRef scp;
15181 llvm::getConstantStringInfo(Scope, scp);
15182 SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
15183 return true;
15184 }
15185 return false;
15186}
15187
15188Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
15189 const CallExpr *E) {
15190 llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
15191 llvm::SyncScope::ID SSID;
15192 switch (BuiltinID) {
15193 case AMDGPU::BI__builtin_amdgcn_div_scale:
15194 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
15195 // Translate from the intrinsics's struct return to the builtin's out
15196 // argument.
15197
15198 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
15199
15200 llvm::Value *X = EmitScalarExpr(E->getArg(0));
15201 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
15202 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
15203
15204 llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
15205 X->getType());
15206
15207 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
15208
15209 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
15210 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
15211
15212 llvm::Type *RealFlagType
15213 = FlagOutPtr.getPointer()->getType()->getPointerElementType();
15214
15215 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
15216 Builder.CreateStore(FlagExt, FlagOutPtr);
15217 return Result;
15218 }
15219 case AMDGPU::BI__builtin_amdgcn_div_fmas:
15220 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
15221 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15222 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15223 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15224 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
15225
15226 llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
15227 Src0->getType());
15228 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
15229 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
15230 }
15231
15232 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
15233 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
15234 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
15235 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
15236 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
15237 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
15238 llvm::SmallVector<llvm::Value *, 6> Args;
15239 for (unsigned I = 0; I != E->getNumArgs(); ++I)
15240 Args.push_back(EmitScalarExpr(E->getArg(I)));
15241 assert(Args.size() == 5 || Args.size() == 6);
15242 if (Args.size() == 5)
15243 Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
15244 Function *F =
15245 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
15246 return Builder.CreateCall(F, Args);
15247 }
15248 case AMDGPU::BI__builtin_amdgcn_div_fixup:
15249 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
15250 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
15251 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
15252 case AMDGPU::BI__builtin_amdgcn_trig_preop:
15253 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
15254 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
15255 case AMDGPU::BI__builtin_amdgcn_rcp:
15256 case AMDGPU::BI__builtin_amdgcn_rcpf:
15257 case AMDGPU::BI__builtin_amdgcn_rcph:
15258 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
15259 case AMDGPU::BI__builtin_amdgcn_sqrt:
15260 case AMDGPU::BI__builtin_amdgcn_sqrtf:
15261 case AMDGPU::BI__builtin_amdgcn_sqrth:
15262 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
15263 case AMDGPU::BI__builtin_amdgcn_rsq:
15264 case AMDGPU::BI__builtin_amdgcn_rsqf:
15265 case AMDGPU::BI__builtin_amdgcn_rsqh:
15266 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
15267 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
15268 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
15269 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
15270 case AMDGPU::BI__builtin_amdgcn_sinf:
15271 case AMDGPU::BI__builtin_amdgcn_sinh:
15272 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
15273 case AMDGPU::BI__builtin_amdgcn_cosf:
15274 case AMDGPU::BI__builtin_amdgcn_cosh:
15275 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
15276 case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
15277 return EmitAMDGPUDispatchPtr(*this, E);
15278 case AMDGPU::BI__builtin_amdgcn_log_clampf:
15279 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
15280 case AMDGPU::BI__builtin_amdgcn_ldexp:
15281 case AMDGPU::BI__builtin_amdgcn_ldexpf:
15282 case AMDGPU::BI__builtin_amdgcn_ldexph:
15283 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
15284 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
15285 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
15286 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
15287 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
15288 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
15289 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
15290 Value *Src0 = EmitScalarExpr(E->getArg(0));
15291 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
15292 { Builder.getInt32Ty(), Src0->getType() });
15293 return Builder.CreateCall(F, Src0);
15294 }
15295 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
15296 Value *Src0 = EmitScalarExpr(E->getArg(0));
15297 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
15298 { Builder.getInt16Ty(), Src0->getType() });
15299 return Builder.CreateCall(F, Src0);
15300 }
15301 case AMDGPU::BI__builtin_amdgcn_fract:
15302 case AMDGPU::BI__builtin_amdgcn_fractf:
15303 case AMDGPU::BI__builtin_amdgcn_fracth:
15304 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
15305 case AMDGPU::BI__builtin_amdgcn_lerp:
15306 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
15307 case AMDGPU::BI__builtin_amdgcn_ubfe:
15308 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
15309 case AMDGPU::BI__builtin_amdgcn_sbfe:
15310 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
15311 case AMDGPU::BI__builtin_amdgcn_uicmp:
15312 case AMDGPU::BI__builtin_amdgcn_uicmpl:
15313 case AMDGPU::BI__builtin_amdgcn_sicmp:
15314 case AMDGPU::BI__builtin_amdgcn_sicmpl: {
15315 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15316 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15317 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15318
15319 // FIXME-GFX10: How should 32 bit mask be handled?
15320 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
15321 { Builder.getInt64Ty(), Src0->getType() });
15322 return Builder.CreateCall(F, { Src0, Src1, Src2 });
15323 }
15324 case AMDGPU::BI__builtin_amdgcn_fcmp:
15325 case AMDGPU::BI__builtin_amdgcn_fcmpf: {
15326 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15327 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15328 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15329
15330 // FIXME-GFX10: How should 32 bit mask be handled?
15331 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
15332 { Builder.getInt64Ty(), Src0->getType() });
15333 return Builder.CreateCall(F, { Src0, Src1, Src2 });
15334 }
15335 case AMDGPU::BI__builtin_amdgcn_class:
15336 case AMDGPU::BI__builtin_amdgcn_classf:
15337 case AMDGPU::BI__builtin_amdgcn_classh:
15338 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
15339 case AMDGPU::BI__builtin_amdgcn_fmed3f:
15340 case AMDGPU::BI__builtin_amdgcn_fmed3h:
15341 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
15342 case AMDGPU::BI__builtin_amdgcn_ds_append:
15343 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
15344 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
15345 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
15346 Value *Src0 = EmitScalarExpr(E->getArg(0));
15347 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
15348 return Builder.CreateCall(F, { Src0, Builder.getFalse() });
15349 }
15350 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
15351 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
15352 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
15353 Intrinsic::ID Intrin;
15354 switch (BuiltinID) {
15355 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
15356 Intrin = Intrinsic::amdgcn_ds_fadd;
15357 break;
15358 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
15359 Intrin = Intrinsic::amdgcn_ds_fmin;
15360 break;
15361 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
15362 Intrin = Intrinsic::amdgcn_ds_fmax;
15363 break;
15364 }
15365 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15366 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15367 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15368 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
15369 llvm::Value *Src4 = EmitScalarExpr(E->getArg(4));
15370 llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() });
15371 llvm::FunctionType *FTy = F->getFunctionType();
15372 llvm::Type *PTy = FTy->getParamType(0);
15373 Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
15374 return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
15375 }
15376 case AMDGPU::BI__builtin_amdgcn_read_exec: {
15377 CallInst *CI = cast<CallInst>(
15378 EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
15379 CI->setConvergent();
15380 return CI;
15381 }
15382 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
15383 case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
15384 StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
15385 "exec_lo" : "exec_hi";
15386 CallInst *CI = cast<CallInst>(
15387 EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName));
15388 CI->setConvergent();
15389 return CI;
15390 }
15391 // amdgcn workitem
15392 case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
15393 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
15394 case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
15395 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
15396 case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
15397 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
15398
15399 // amdgcn workgroup size
15400 case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
15401 return EmitAMDGPUWorkGroupSize(*this, 0);
15402 case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
15403 return EmitAMDGPUWorkGroupSize(*this, 1);
15404 case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
15405 return EmitAMDGPUWorkGroupSize(*this, 2);
15406
15407 // amdgcn grid size
15408 case AMDGPU::BI__builtin_amdgcn_grid_size_x:
15409 return EmitAMDGPUGridSize(*this, 0);
15410 case AMDGPU::BI__builtin_amdgcn_grid_size_y:
15411 return EmitAMDGPUGridSize(*this, 1);
15412 case AMDGPU::BI__builtin_amdgcn_grid_size_z:
15413 return EmitAMDGPUGridSize(*this, 2);
15414
15415 // r600 intrinsics
15416 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
15417 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
15418 return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
15419 case AMDGPU::BI__builtin_r600_read_tidig_x:
15420 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
15421 case AMDGPU::BI__builtin_r600_read_tidig_y:
15422 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
15423 case AMDGPU::BI__builtin_r600_read_tidig_z:
15424 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
15425 case AMDGPU::BI__builtin_amdgcn_alignbit: {
15426 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
15427 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
15428 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
15429 Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
15430 return Builder.CreateCall(F, { Src0, Src1, Src2 });
15431 }
15432
15433 case AMDGPU::BI__builtin_amdgcn_fence: {
15434 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
15435 EmitScalarExpr(E->getArg(1)), AO, SSID))
15436 return Builder.CreateFence(AO, SSID);
15437 LLVM_FALLTHROUGH;
15438 }
15439 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
15440 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
15441 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
15442 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
15443 unsigned BuiltinAtomicOp;
15444 llvm::Type *ResultType = ConvertType(E->getType());
15445
15446 switch (BuiltinID) {
15447 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
15448 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
15449 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
15450 break;
15451 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
15452 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
15453 BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
15454 break;
15455 }
15456
15457 Value *Ptr = EmitScalarExpr(E->getArg(0));
15458 Value *Val = EmitScalarExpr(E->getArg(1));
15459
15460 llvm::Function *F =
15461 CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
15462
15463 if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
15464 EmitScalarExpr(E->getArg(3)), AO, SSID)) {
15465
15466 // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
15467 // scope as unsigned values
15468 Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
15469 Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
15470
15471 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
15472 bool Volatile =
15473 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
15474 Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
15475
15476 return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
15477 }
15478 LLVM_FALLTHROUGH;
15479 }
15480 default:
15481 return nullptr;
15482 }
15483}
15484
15485/// Handle a SystemZ function in which the final argument is a pointer
15486/// to an int that receives the post-instruction CC value. At the LLVM level
15487/// this is represented as a function that returns a {result, cc} pair.
15488static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
15489 unsigned IntrinsicID,
15490 const CallExpr *E) {
15491 unsigned NumArgs = E->getNumArgs() - 1;
15492 SmallVector<Value *, 8> Args(NumArgs);
15493 for (unsigned I = 0; I < NumArgs; ++I)
15494 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
15495 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
15496 Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
15497 Value *Call = CGF.Builder.CreateCall(F, Args);
15498 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
15499 CGF.Builder.CreateStore(CC, CCPtr);
15500 return CGF.Builder.CreateExtractValue(Call, 0);
15501}
15502
15503Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
15504 const CallExpr *E) {
15505 switch (BuiltinID) {
15506 case SystemZ::BI__builtin_tbegin: {
15507 Value *TDB = EmitScalarExpr(E->getArg(0));
15508 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
15509 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
15510 return Builder.CreateCall(F, {TDB, Control});
15511 }
15512 case SystemZ::BI__builtin_tbegin_nofloat: {
15513 Value *TDB = EmitScalarExpr(E->getArg(0));
15514 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
15515 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
15516 return Builder.CreateCall(F, {TDB, Control});
15517 }
15518 case SystemZ::BI__builtin_tbeginc: {
15519 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
15520 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
15521 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
15522 return Builder.CreateCall(F, {TDB, Control});
15523 }
15524 case SystemZ::BI__builtin_tabort: {
15525 Value *Data = EmitScalarExpr(E->getArg(0));
15526 Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
15527 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
15528 }
15529 case SystemZ::BI__builtin_non_tx_store: {
15530 Value *Address = EmitScalarExpr(E->getArg(0));
15531 Value *Data = EmitScalarExpr(E->getArg(1));
15532 Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
15533 return Builder.CreateCall(F, {Data, Address});
15534 }
15535
15536 // Vector builtins. Note that most vector builtins are mapped automatically
15537 // to target-specific LLVM intrinsics. The ones handled specially here can
15538 // be represented via standard LLVM IR, which is preferable to enable common
15539 // LLVM optimizations.
15540
15541 case SystemZ::BI__builtin_s390_vpopctb:
15542 case SystemZ::BI__builtin_s390_vpopcth:
15543 case SystemZ::BI__builtin_s390_vpopctf:
15544 case SystemZ::BI__builtin_s390_vpopctg: {
15545 llvm::Type *ResultType = ConvertType(E->getType());
15546 Value *X = EmitScalarExpr(E->getArg(0));
15547 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
15548 return Builder.CreateCall(F, X);
15549 }
15550
15551 case SystemZ::BI__builtin_s390_vclzb:
15552 case SystemZ::BI__builtin_s390_vclzh:
15553 case SystemZ::BI__builtin_s390_vclzf:
15554 case SystemZ::BI__builtin_s390_vclzg: {
15555 llvm::Type *ResultType = ConvertType(E->getType());
15556 Value *X = EmitScalarExpr(E->getArg(0));
15557 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
15558 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
15559 return Builder.CreateCall(F, {X, Undef});
15560 }
15561
15562 case SystemZ::BI__builtin_s390_vctzb:
15563 case SystemZ::BI__builtin_s390_vctzh:
15564 case SystemZ::BI__builtin_s390_vctzf:
15565 case SystemZ::BI__builtin_s390_vctzg: {
15566 llvm::Type *ResultType = ConvertType(E->getType());
15567 Value *X = EmitScalarExpr(E->getArg(0));
15568 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
15569 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
15570 return Builder.CreateCall(F, {X, Undef});
15571 }
15572
15573 case SystemZ::BI__builtin_s390_vfsqsb:
15574 case SystemZ::BI__builtin_s390_vfsqdb: {
15575 llvm::Type *ResultType = ConvertType(E->getType());
15576 Value *X = EmitScalarExpr(E->getArg(0));
15577 if (Builder.getIsFPConstrained()) {
15578 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
15579 return Builder.CreateConstrainedFPCall(F, { X });
15580 } else {
15581 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
15582 return Builder.CreateCall(F, X);
15583 }
15584 }
15585 case SystemZ::BI__builtin_s390_vfmasb:
15586 case SystemZ::BI__builtin_s390_vfmadb: {
15587 llvm::Type *ResultType = ConvertType(E->getType());
15588 Value *X = EmitScalarExpr(E->getArg(0));
15589 Value *Y = EmitScalarExpr(E->getArg(1));
15590 Value *Z = EmitScalarExpr(E->getArg(2));
15591 if (Builder.getIsFPConstrained()) {
15592 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15593 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
15594 } else {
15595 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15596 return Builder.CreateCall(F, {X, Y, Z});
15597 }
15598 }
15599 case SystemZ::BI__builtin_s390_vfmssb:
15600 case SystemZ::BI__builtin_s390_vfmsdb: {
15601 llvm::Type *ResultType = ConvertType(E->getType());
15602 Value *X = EmitScalarExpr(E->getArg(0));
15603 Value *Y = EmitScalarExpr(E->getArg(1));
15604 Value *Z = EmitScalarExpr(E->getArg(2));
15605 if (Builder.getIsFPConstrained()) {
15606 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15607 return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15608 } else {
15609 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15610 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15611 }
15612 }
15613 case SystemZ::BI__builtin_s390_vfnmasb:
15614 case SystemZ::BI__builtin_s390_vfnmadb: {
15615 llvm::Type *ResultType = ConvertType(E->getType());
15616 Value *X = EmitScalarExpr(E->getArg(0));
15617 Value *Y = EmitScalarExpr(E->getArg(1));
15618 Value *Z = EmitScalarExpr(E->getArg(2));
15619 if (Builder.getIsFPConstrained()) {
15620 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15621 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
15622 } else {
15623 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15624 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
15625 }
15626 }
15627 case SystemZ::BI__builtin_s390_vfnmssb:
15628 case SystemZ::BI__builtin_s390_vfnmsdb: {
15629 llvm::Type *ResultType = ConvertType(E->getType());
15630 Value *X = EmitScalarExpr(E->getArg(0));
15631 Value *Y = EmitScalarExpr(E->getArg(1));
15632 Value *Z = EmitScalarExpr(E->getArg(2));
15633 if (Builder.getIsFPConstrained()) {
15634 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15635 Value *NegZ = Builder.CreateFNeg(Z, "sub");
15636 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
15637 } else {
15638 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15639 Value *NegZ = Builder.CreateFNeg(Z, "neg");
15640 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
15641 }
15642 }
15643 case SystemZ::BI__builtin_s390_vflpsb:
15644 case SystemZ::BI__builtin_s390_vflpdb: {
15645 llvm::Type *ResultType = ConvertType(E->getType());
15646 Value *X = EmitScalarExpr(E->getArg(0));
15647 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15648 return Builder.CreateCall(F, X);
15649 }
15650 case SystemZ::BI__builtin_s390_vflnsb:
15651 case SystemZ::BI__builtin_s390_vflndb: {
15652 llvm::Type *ResultType = ConvertType(E->getType());
15653 Value *X = EmitScalarExpr(E->getArg(0));
15654 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15655 return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg");
15656 }
15657 case SystemZ::BI__builtin_s390_vfisb:
15658 case SystemZ::BI__builtin_s390_vfidb: {
15659 llvm::Type *ResultType = ConvertType(E->getType());
15660 Value *X = EmitScalarExpr(E->getArg(0));
15661 // Constant-fold the M4 and M5 mask arguments.
15662 llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext());
15663 llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15664 // Check whether this instance can be represented via a LLVM standard
15665 // intrinsic. We only support some combinations of M4 and M5.
15666 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15667 Intrinsic::ID CI;
15668 switch (M4.getZExtValue()) {
15669 default: break;
15670 case 0: // IEEE-inexact exception allowed
15671 switch (M5.getZExtValue()) {
15672 default: break;
15673 case 0: ID = Intrinsic::rint;
15674 CI = Intrinsic::experimental_constrained_rint; break;
15675 }
15676 break;
15677 case 4: // IEEE-inexact exception suppressed
15678 switch (M5.getZExtValue()) {
15679 default: break;
15680 case 0: ID = Intrinsic::nearbyint;
15681 CI = Intrinsic::experimental_constrained_nearbyint; break;
15682 case 1: ID = Intrinsic::round;
15683 CI = Intrinsic::experimental_constrained_round; break;
15684 case 5: ID = Intrinsic::trunc;
15685 CI = Intrinsic::experimental_constrained_trunc; break;
15686 case 6: ID = Intrinsic::ceil;
15687 CI = Intrinsic::experimental_constrained_ceil; break;
15688 case 7: ID = Intrinsic::floor;
15689 CI = Intrinsic::experimental_constrained_floor; break;
15690 }
15691 break;
15692 }
15693 if (ID != Intrinsic::not_intrinsic) {
15694 if (Builder.getIsFPConstrained()) {
15695 Function *F = CGM.getIntrinsic(CI, ResultType);
15696 return Builder.CreateConstrainedFPCall(F, X);
15697 } else {
15698 Function *F = CGM.getIntrinsic(ID, ResultType);
15699 return Builder.CreateCall(F, X);
15700 }
15701 }
15702 switch (BuiltinID) { // FIXME: constrained version?
15703 case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
15704 case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
15705 default: llvm_unreachable("Unknown BuiltinID");
15706 }
15707 Function *F = CGM.getIntrinsic(ID);
15708 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15709 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
15710 return Builder.CreateCall(F, {X, M4Value, M5Value});
15711 }
15712 case SystemZ::BI__builtin_s390_vfmaxsb:
15713 case SystemZ::BI__builtin_s390_vfmaxdb: {
15714 llvm::Type *ResultType = ConvertType(E->getType());
15715 Value *X = EmitScalarExpr(E->getArg(0));
15716 Value *Y = EmitScalarExpr(E->getArg(1));
15717 // Constant-fold the M4 mask argument.
15718 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15719 // Check whether this instance can be represented via a LLVM standard
15720 // intrinsic. We only support some values of M4.
15721 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15722 Intrinsic::ID CI;
15723 switch (M4.getZExtValue()) {
15724 default: break;
15725 case 4: ID = Intrinsic::maxnum;
15726 CI = Intrinsic::experimental_constrained_maxnum; break;
15727 }
15728 if (ID != Intrinsic::not_intrinsic) {
15729 if (Builder.getIsFPConstrained()) {
15730 Function *F = CGM.getIntrinsic(CI, ResultType);
15731 return Builder.CreateConstrainedFPCall(F, {X, Y});
15732 } else {
15733 Function *F = CGM.getIntrinsic(ID, ResultType);
15734 return Builder.CreateCall(F, {X, Y});
15735 }
15736 }
15737 switch (BuiltinID) {
15738 case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
15739 case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
15740 default: llvm_unreachable("Unknown BuiltinID");
15741 }
15742 Function *F = CGM.getIntrinsic(ID);
15743 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15744 return Builder.CreateCall(F, {X, Y, M4Value});
15745 }
15746 case SystemZ::BI__builtin_s390_vfminsb:
15747 case SystemZ::BI__builtin_s390_vfmindb: {
15748 llvm::Type *ResultType = ConvertType(E->getType());
15749 Value *X = EmitScalarExpr(E->getArg(0));
15750 Value *Y = EmitScalarExpr(E->getArg(1));
15751 // Constant-fold the M4 mask argument.
15752 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
15753 // Check whether this instance can be represented via a LLVM standard
15754 // intrinsic. We only support some values of M4.
15755 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15756 Intrinsic::ID CI;
15757 switch (M4.getZExtValue()) {
15758 default: break;
15759 case 4: ID = Intrinsic::minnum;
15760 CI = Intrinsic::experimental_constrained_minnum; break;
15761 }
15762 if (ID != Intrinsic::not_intrinsic) {
15763 if (Builder.getIsFPConstrained()) {
15764 Function *F = CGM.getIntrinsic(CI, ResultType);
15765 return Builder.CreateConstrainedFPCall(F, {X, Y});
15766 } else {
15767 Function *F = CGM.getIntrinsic(ID, ResultType);
15768 return Builder.CreateCall(F, {X, Y});
15769 }
15770 }
15771 switch (BuiltinID) {
15772 case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
15773 case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
15774 default: llvm_unreachable("Unknown BuiltinID");
15775 }
15776 Function *F = CGM.getIntrinsic(ID);
15777 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
15778 return Builder.CreateCall(F, {X, Y, M4Value});
15779 }
15780
15781 case SystemZ::BI__builtin_s390_vlbrh:
15782 case SystemZ::BI__builtin_s390_vlbrf:
15783 case SystemZ::BI__builtin_s390_vlbrg: {
15784 llvm::Type *ResultType = ConvertType(E->getType());
15785 Value *X = EmitScalarExpr(E->getArg(0));
15786 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
15787 return Builder.CreateCall(F, X);
15788 }
15789
15790 // Vector intrinsics that output the post-instruction CC value.
15791
15792#define INTRINSIC_WITH_CC(NAME) \
15793 case SystemZ::BI__builtin_##NAME: \
15794 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
15795
15796 INTRINSIC_WITH_CC(s390_vpkshs);
15797 INTRINSIC_WITH_CC(s390_vpksfs);
15798 INTRINSIC_WITH_CC(s390_vpksgs);
15799
15800 INTRINSIC_WITH_CC(s390_vpklshs);
15801 INTRINSIC_WITH_CC(s390_vpklsfs);
15802 INTRINSIC_WITH_CC(s390_vpklsgs);
15803
15804 INTRINSIC_WITH_CC(s390_vceqbs);
15805 INTRINSIC_WITH_CC(s390_vceqhs);
15806 INTRINSIC_WITH_CC(s390_vceqfs);
15807 INTRINSIC_WITH_CC(s390_vceqgs);
15808
15809 INTRINSIC_WITH_CC(s390_vchbs);
15810 INTRINSIC_WITH_CC(s390_vchhs);
15811 INTRINSIC_WITH_CC(s390_vchfs);
15812 INTRINSIC_WITH_CC(s390_vchgs);
15813
15814 INTRINSIC_WITH_CC(s390_vchlbs);
15815 INTRINSIC_WITH_CC(s390_vchlhs);
15816 INTRINSIC_WITH_CC(s390_vchlfs);
15817 INTRINSIC_WITH_CC(s390_vchlgs);
15818
15819 INTRINSIC_WITH_CC(s390_vfaebs);
15820 INTRINSIC_WITH_CC(s390_vfaehs);
15821 INTRINSIC_WITH_CC(s390_vfaefs);
15822
15823 INTRINSIC_WITH_CC(s390_vfaezbs);
15824 INTRINSIC_WITH_CC(s390_vfaezhs);
15825 INTRINSIC_WITH_CC(s390_vfaezfs);
15826
15827 INTRINSIC_WITH_CC(s390_vfeebs);
15828 INTRINSIC_WITH_CC(s390_vfeehs);
15829 INTRINSIC_WITH_CC(s390_vfeefs);
15830
15831 INTRINSIC_WITH_CC(s390_vfeezbs);
15832 INTRINSIC_WITH_CC(s390_vfeezhs);
15833 INTRINSIC_WITH_CC(s390_vfeezfs);
15834
15835 INTRINSIC_WITH_CC(s390_vfenebs);
15836 INTRINSIC_WITH_CC(s390_vfenehs);
15837 INTRINSIC_WITH_CC(s390_vfenefs);
15838
15839 INTRINSIC_WITH_CC(s390_vfenezbs);
15840 INTRINSIC_WITH_CC(s390_vfenezhs);
15841 INTRINSIC_WITH_CC(s390_vfenezfs);
15842
15843 INTRINSIC_WITH_CC(s390_vistrbs);
15844 INTRINSIC_WITH_CC(s390_vistrhs);
15845 INTRINSIC_WITH_CC(s390_vistrfs);
15846
15847 INTRINSIC_WITH_CC(s390_vstrcbs);
15848 INTRINSIC_WITH_CC(s390_vstrchs);
15849 INTRINSIC_WITH_CC(s390_vstrcfs);
15850
15851 INTRINSIC_WITH_CC(s390_vstrczbs);
15852 INTRINSIC_WITH_CC(s390_vstrczhs);
15853 INTRINSIC_WITH_CC(s390_vstrczfs);
15854
15855 INTRINSIC_WITH_CC(s390_vfcesbs);
15856 INTRINSIC_WITH_CC(s390_vfcedbs);
15857 INTRINSIC_WITH_CC(s390_vfchsbs);
15858 INTRINSIC_WITH_CC(s390_vfchdbs);
15859 INTRINSIC_WITH_CC(s390_vfchesbs);
15860 INTRINSIC_WITH_CC(s390_vfchedbs);
15861
15862 INTRINSIC_WITH_CC(s390_vftcisb);
15863 INTRINSIC_WITH_CC(s390_vftcidb);
15864
15865 INTRINSIC_WITH_CC(s390_vstrsb);
15866 INTRINSIC_WITH_CC(s390_vstrsh);
15867 INTRINSIC_WITH_CC(s390_vstrsf);
15868
15869 INTRINSIC_WITH_CC(s390_vstrszb);
15870 INTRINSIC_WITH_CC(s390_vstrszh);
15871 INTRINSIC_WITH_CC(s390_vstrszf);
15872
15873#undef INTRINSIC_WITH_CC
15874
15875 default:
15876 return nullptr;
15877 }
15878}
15879
15880namespace {
15881// Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
15882struct NVPTXMmaLdstInfo {
15883 unsigned NumResults; // Number of elements to load/store
15884 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
15885 unsigned IID_col;
15886 unsigned IID_row;
15887};
15888
15889#define MMA_INTR(geom_op_type, layout) \
15890 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
15891#define MMA_LDST(n, geom_op_type) \
15892 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
15893
15894static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
15895 switch (BuiltinID) {
15896 // FP MMA loads
15897 case NVPTX::BI__hmma_m16n16k16_ld_a:
15898 return MMA_LDST(8, m16n16k16_load_a_f16);
15899 case NVPTX::BI__hmma_m16n16k16_ld_b:
15900 return MMA_LDST(8, m16n16k16_load_b_f16);
15901 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
15902 return MMA_LDST(4, m16n16k16_load_c_f16);
15903 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
15904 return MMA_LDST(8, m16n16k16_load_c_f32);
15905 case NVPTX::BI__hmma_m32n8k16_ld_a:
15906 return MMA_LDST(8, m32n8k16_load_a_f16);
15907 case NVPTX::BI__hmma_m32n8k16_ld_b:
15908 return MMA_LDST(8, m32n8k16_load_b_f16);
15909 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
15910 return MMA_LDST(4, m32n8k16_load_c_f16);
15911 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
15912 return MMA_LDST(8, m32n8k16_load_c_f32);
15913 case NVPTX::BI__hmma_m8n32k16_ld_a:
15914 return MMA_LDST(8, m8n32k16_load_a_f16);
15915 case NVPTX::BI__hmma_m8n32k16_ld_b:
15916 return MMA_LDST(8, m8n32k16_load_b_f16);
15917 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
15918 return MMA_LDST(4, m8n32k16_load_c_f16);
15919 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
15920 return MMA_LDST(8, m8n32k16_load_c_f32);
15921
15922 // Integer MMA loads
15923 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
15924 return MMA_LDST(2, m16n16k16_load_a_s8);
15925 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
15926 return MMA_LDST(2, m16n16k16_load_a_u8);
15927 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
15928 return MMA_LDST(2, m16n16k16_load_b_s8);
15929 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
15930 return MMA_LDST(2, m16n16k16_load_b_u8);
15931 case NVPTX::BI__imma_m16n16k16_ld_c:
15932 return MMA_LDST(8, m16n16k16_load_c_s32);
15933 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
15934 return MMA_LDST(4, m32n8k16_load_a_s8);
15935 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
15936 return MMA_LDST(4, m32n8k16_load_a_u8);
15937 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
15938 return MMA_LDST(1, m32n8k16_load_b_s8);
15939 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
15940 return MMA_LDST(1, m32n8k16_load_b_u8);
15941 case NVPTX::BI__imma_m32n8k16_ld_c:
15942 return MMA_LDST(8, m32n8k16_load_c_s32);
15943 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
15944 return MMA_LDST(1, m8n32k16_load_a_s8);
15945 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
15946 return MMA_LDST(1, m8n32k16_load_a_u8);
15947 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
15948 return MMA_LDST(4, m8n32k16_load_b_s8);
15949 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
15950 return MMA_LDST(4, m8n32k16_load_b_u8);
15951 case NVPTX::BI__imma_m8n32k16_ld_c:
15952 return MMA_LDST(8, m8n32k16_load_c_s32);
15953
15954 // Sub-integer MMA loads.
15955 // Only row/col layout is supported by A/B fragments.
15956 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
15957 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
15958 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
15959 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
15960 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
15961 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
15962 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
15963 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
15964 case NVPTX::BI__imma_m8n8k32_ld_c:
15965 return MMA_LDST(2, m8n8k32_load_c_s32);
15966 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
15967 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
15968 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
15969 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
15970 case NVPTX::BI__bmma_m8n8k128_ld_c:
15971 return MMA_LDST(2, m8n8k128_load_c_s32);
15972
15973 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
15974 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
15975 // use fragment C for both loads and stores.
15976 // FP MMA stores.
15977 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
15978 return MMA_LDST(4, m16n16k16_store_d_f16);
15979 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
15980 return MMA_LDST(8, m16n16k16_store_d_f32);
15981 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
15982 return MMA_LDST(4, m32n8k16_store_d_f16);
15983 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
15984 return MMA_LDST(8, m32n8k16_store_d_f32);
15985 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
15986 return MMA_LDST(4, m8n32k16_store_d_f16);
15987 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
15988 return MMA_LDST(8, m8n32k16_store_d_f32);
15989
15990 // Integer and sub-integer MMA stores.
15991 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
15992 // name, integer loads/stores use LLVM's i32.
15993 case NVPTX::BI__imma_m16n16k16_st_c_i32:
15994 return MMA_LDST(8, m16n16k16_store_d_s32);
15995 case NVPTX::BI__imma_m32n8k16_st_c_i32:
15996 return MMA_LDST(8, m32n8k16_store_d_s32);
15997 case NVPTX::BI__imma_m8n32k16_st_c_i32:
15998 return MMA_LDST(8, m8n32k16_store_d_s32);
15999 case NVPTX::BI__imma_m8n8k32_st_c_i32:
16000 return MMA_LDST(2, m8n8k32_store_d_s32);
16001 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
16002 return MMA_LDST(2, m8n8k128_store_d_s32);
16003
16004 default:
16005 llvm_unreachable("Unknown MMA builtin");
16006 }
16007}
16008#undef MMA_LDST
16009#undef MMA_INTR
16010
16011
16012struct NVPTXMmaInfo {
16013 unsigned NumEltsA;
16014 unsigned NumEltsB;
16015 unsigned NumEltsC;
16016 unsigned NumEltsD;
16017 std::array<unsigned, 8> Variants;
16018
16019 unsigned getMMAIntrinsic(int Layout, bool Satf) {
16020 unsigned Index = Layout * 2 + Satf;
16021 if (Index >= Variants.size())
16022 return 0;
16023 return Variants[Index];
16024 }
16025};
16026
16027 // Returns an intrinsic that matches Layout and Satf for valid combinations of
16028 // Layout and Satf, 0 otherwise.
16029static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
16030 // clang-format off
16031#define MMA_VARIANTS(geom, type) {{ \
16032 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
16033 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
16034 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
16035 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
16036 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
16037 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
16038 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
16039 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
16040 }}
16041// Sub-integer MMA only supports row.col layout.
16042#define MMA_VARIANTS_I4(geom, type) {{ \
16043 0, \
16044 0, \
16045 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
16046 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
16047 0, \
16048 0, \
16049 0, \
16050 0 \
16051 }}
16052// b1 MMA does not support .satfinite.
16053#define MMA_VARIANTS_B1(geom, type) {{ \
16054 0, \
16055 0, \
16056 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
16057 0, \
16058 0, \
16059 0, \
16060 0, \
16061 0 \
16062 }}
16063 // clang-format on
16064 switch (BuiltinID) {
16065 // FP MMA
16066 // Note that 'type' argument of MMA_VARIANT uses D_C notation, while
16067 // NumEltsN of return value are ordered as A,B,C,D.
16068 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
16069 return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)};
16070 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
16071 return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)};
16072 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
16073 return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)};
16074 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
16075 return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)};
16076 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
16077 return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)};
16078 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
16079 return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)};
16080 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
16081 return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)};
16082 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
16083 return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)};
16084 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
16085 return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)};
16086 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
16087 return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)};
16088 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
16089 return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)};
16090 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
16091 return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)};
16092
16093 // Integer MMA
16094 case NVPTX::BI__imma_m16n16k16_mma_s8:
16095 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)};
16096 case NVPTX::BI__imma_m16n16k16_mma_u8:
16097 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)};
16098 case NVPTX::BI__imma_m32n8k16_mma_s8:
16099 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)};
16100 case NVPTX::BI__imma_m32n8k16_mma_u8:
16101 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)};
16102 case NVPTX::BI__imma_m8n32k16_mma_s8:
16103 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)};
16104 case NVPTX::BI__imma_m8n32k16_mma_u8:
16105 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)};
16106
16107 // Sub-integer MMA
16108 case NVPTX::BI__imma_m8n8k32_mma_s4:
16109 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)};
16110 case NVPTX::BI__imma_m8n8k32_mma_u4:
16111 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)};
16112 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
16113 return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)};
16114 default:
16115 llvm_unreachable("Unexpected builtin ID.");
16116 }
16117#undef MMA_VARIANTS
16118#undef MMA_VARIANTS_I4
16119#undef MMA_VARIANTS_B1
16120}
16121
16122} // namespace
16123
16124Value *
16125CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
16126 auto MakeLdg = [&](unsigned IntrinsicID) {
16127 Value *Ptr = EmitScalarExpr(E->getArg(0));
16128 clang::CharUnits Align =
16129 CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
16130 return Builder.CreateCall(
16131 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
16132 Ptr->getType()}),
16133 {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
16134 };
16135 auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
16136 Value *Ptr = EmitScalarExpr(E->getArg(0));
16137 return Builder.CreateCall(
16138 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
16139 Ptr->getType()}),
16140 {Ptr, EmitScalarExpr(E->getArg(1))});
16141 };
16142 switch (BuiltinID) {
16143 case NVPTX::BI__nvvm_atom_add_gen_i:
16144 case NVPTX::BI__nvvm_atom_add_gen_l:
16145 case NVPTX::BI__nvvm_atom_add_gen_ll:
16146 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
16147
16148 case NVPTX::BI__nvvm_atom_sub_gen_i:
16149 case NVPTX::BI__nvvm_atom_sub_gen_l:
16150 case NVPTX::BI__nvvm_atom_sub_gen_ll:
16151 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
16152
16153 case NVPTX::BI__nvvm_atom_and_gen_i:
16154 case NVPTX::BI__nvvm_atom_and_gen_l:
16155 case NVPTX::BI__nvvm_atom_and_gen_ll:
16156 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
16157
16158 case NVPTX::BI__nvvm_atom_or_gen_i:
16159 case NVPTX::BI__nvvm_atom_or_gen_l:
16160 case NVPTX::BI__nvvm_atom_or_gen_ll:
16161 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
16162
16163 case NVPTX::BI__nvvm_atom_xor_gen_i:
16164 case NVPTX::BI__nvvm_atom_xor_gen_l:
16165 case NVPTX::BI__nvvm_atom_xor_gen_ll:
16166 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
16167
16168 case NVPTX::BI__nvvm_atom_xchg_gen_i:
16169 case NVPTX::BI__nvvm_atom_xchg_gen_l:
16170 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
16171 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
16172
16173 case NVPTX::BI__nvvm_atom_max_gen_i:
16174 case NVPTX::BI__nvvm_atom_max_gen_l:
16175 case NVPTX::BI__nvvm_atom_max_gen_ll:
16176 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
16177
16178 case NVPTX::BI__nvvm_atom_max_gen_ui:
16179 case NVPTX::BI__nvvm_atom_max_gen_ul:
16180 case NVPTX::BI__nvvm_atom_max_gen_ull:
16181 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
16182
16183 case NVPTX::BI__nvvm_atom_min_gen_i:
16184 case NVPTX::BI__nvvm_atom_min_gen_l:
16185 case NVPTX::BI__nvvm_atom_min_gen_ll:
16186 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
16187
16188 case NVPTX::BI__nvvm_atom_min_gen_ui:
16189 case NVPTX::BI__nvvm_atom_min_gen_ul:
16190 case NVPTX::BI__nvvm_atom_min_gen_ull:
16191 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
16192
16193 case NVPTX::BI__nvvm_atom_cas_gen_i:
16194 case NVPTX::BI__nvvm_atom_cas_gen_l:
16195 case NVPTX::BI__nvvm_atom_cas_gen_ll:
16196 // __nvvm_atom_cas_gen_* should return the old value rather than the
16197 // success flag.
16198 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
16199
16200 case NVPTX::BI__nvvm_atom_add_gen_f:
16201 case NVPTX::BI__nvvm_atom_add_gen_d: {
16202 Value *Ptr = EmitScalarExpr(E->getArg(0));
16203 Value *Val = EmitScalarExpr(E->getArg(1));
16204 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
16205 AtomicOrdering::SequentiallyConsistent);
16206 }
16207
16208 case NVPTX::BI__nvvm_atom_inc_gen_ui: {
16209 Value *Ptr = EmitScalarExpr(E->getArg(0));
16210 Value *Val = EmitScalarExpr(E->getArg(1));
16211 Function *FnALI32 =
16212 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
16213 return Builder.CreateCall(FnALI32, {Ptr, Val});
16214 }
16215
16216 case NVPTX::BI__nvvm_atom_dec_gen_ui: {
16217 Value *Ptr = EmitScalarExpr(E->getArg(0));
16218 Value *Val = EmitScalarExpr(E->getArg(1));
16219 Function *FnALD32 =
16220 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
16221 return Builder.CreateCall(FnALD32, {Ptr, Val});
16222 }
16223
16224 case NVPTX::BI__nvvm_ldg_c:
16225 case NVPTX::BI__nvvm_ldg_c2:
16226 case NVPTX::BI__nvvm_ldg_c4:
16227 case NVPTX::BI__nvvm_ldg_s:
16228 case NVPTX::BI__nvvm_ldg_s2:
16229 case NVPTX::BI__nvvm_ldg_s4:
16230 case NVPTX::BI__nvvm_ldg_i:
16231 case NVPTX::BI__nvvm_ldg_i2:
16232 case NVPTX::BI__nvvm_ldg_i4:
16233 case NVPTX::BI__nvvm_ldg_l:
16234 case NVPTX::BI__nvvm_ldg_ll:
16235 case NVPTX::BI__nvvm_ldg_ll2:
16236 case NVPTX::BI__nvvm_ldg_uc:
16237 case NVPTX::BI__nvvm_ldg_uc2:
16238 case NVPTX::BI__nvvm_ldg_uc4:
16239 case NVPTX::BI__nvvm_ldg_us:
16240 case NVPTX::BI__nvvm_ldg_us2:
16241 case NVPTX::BI__nvvm_ldg_us4:
16242 case NVPTX::BI__nvvm_ldg_ui:
16243 case NVPTX::BI__nvvm_ldg_ui2:
16244 case NVPTX::BI__nvvm_ldg_ui4:
16245 case NVPTX::BI__nvvm_ldg_ul:
16246 case NVPTX::BI__nvvm_ldg_ull:
16247 case NVPTX::BI__nvvm_ldg_ull2:
16248 // PTX Interoperability section 2.2: "For a vector with an even number of
16249 // elements, its alignment is set to number of elements times the alignment
16250 // of its member: n*alignof(t)."
16251 return MakeLdg(Intrinsic::nvvm_ldg_global_i);
16252 case NVPTX::BI__nvvm_ldg_f:
16253 case NVPTX::BI__nvvm_ldg_f2:
16254 case NVPTX::BI__nvvm_ldg_f4:
16255 case NVPTX::BI__nvvm_ldg_d:
16256 case NVPTX::BI__nvvm_ldg_d2:
16257 return MakeLdg(Intrinsic::nvvm_ldg_global_f);
16258
16259 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
16260 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
16261 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
16262 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
16263 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
16264 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
16265 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
16266 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
16267 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
16268 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
16269 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
16270 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
16271 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
16272 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
16273 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
16274 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
16275 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
16276 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
16277 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
16278 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
16279 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
16280 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
16281 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
16282 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
16283 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
16284 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
16285 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
16286 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
16287 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
16288 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
16289 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
16290 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
16291 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
16292 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
16293 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
16294 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
16295 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
16296 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
16297 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
16298 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
16299 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
16300 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
16301 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
16302 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
16303 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
16304 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
16305 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
16306 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
16307 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
16308 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
16309 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
16310 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
16311 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
16312 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
16313 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
16314 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
16315 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
16316 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
16317 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
16318 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
16319 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
16320 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
16321 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
16322 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
16323 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
16324 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
16325 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
16326 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
16327 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
16328 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
16329 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
16330 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
16331 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
16332 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
16333 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
16334 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
16335 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
16336 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
16337 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
16338 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
16339 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
16340 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
16341 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
16342 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
16343 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
16344 Value *Ptr = EmitScalarExpr(E->getArg(0));
16345 return Builder.CreateCall(
16346 CGM.getIntrinsic(
16347 Intrinsic::nvvm_atomic_cas_gen_i_cta,
16348 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
16349 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
16350 }
16351 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
16352 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
16353 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
16354 Value *Ptr = EmitScalarExpr(E->getArg(0));
16355 return Builder.CreateCall(
16356 CGM.getIntrinsic(
16357 Intrinsic::nvvm_atomic_cas_gen_i_sys,
16358 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
16359 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
16360 }
16361 case NVPTX::BI__nvvm_match_all_sync_i32p:
16362 case NVPTX::BI__nvvm_match_all_sync_i64p: {
16363 Value *Mask = EmitScalarExpr(E->getArg(0));
16364 Value *Val = EmitScalarExpr(E->getArg(1));
16365 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
16366 Value *ResultPair = Builder.CreateCall(
16367 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
16368 ? Intrinsic::nvvm_match_all_sync_i32p
16369 : Intrinsic::nvvm_match_all_sync_i64p),
16370 {Mask, Val});
16371 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
16372 PredOutPtr.getElementType());
16373 Builder.CreateStore(Pred, PredOutPtr);
16374 return Builder.CreateExtractValue(ResultPair, 0);
16375 }
16376
16377 // FP MMA loads
16378 case NVPTX::BI__hmma_m16n16k16_ld_a:
16379 case NVPTX::BI__hmma_m16n16k16_ld_b:
16380 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
16381 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
16382 case NVPTX::BI__hmma_m32n8k16_ld_a:
16383 case NVPTX::BI__hmma_m32n8k16_ld_b:
16384 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
16385 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
16386 case NVPTX::BI__hmma_m8n32k16_ld_a:
16387 case NVPTX::BI__hmma_m8n32k16_ld_b:
16388 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
16389 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
16390 // Integer MMA loads.
16391 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
16392 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
16393 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
16394 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
16395 case NVPTX::BI__imma_m16n16k16_ld_c:
16396 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
16397 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
16398 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
16399 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
16400 case NVPTX::BI__imma_m32n8k16_ld_c:
16401 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
16402 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
16403 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
16404 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
16405 case NVPTX::BI__imma_m8n32k16_ld_c:
16406 // Sub-integer MMA loads.
16407 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
16408 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
16409 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
16410 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
16411 case NVPTX::BI__imma_m8n8k32_ld_c:
16412 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
16413 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
16414 case NVPTX::BI__bmma_m8n8k128_ld_c:
16415 {
16416 Address Dst = EmitPointerWithAlignment(E->getArg(0));
16417 Value *Src = EmitScalarExpr(E->getArg(1));
16418 Value *Ldm = EmitScalarExpr(E->getArg(2));
16419 Optional<llvm::APSInt> isColMajorArg =
16420 E->getArg(3)->getIntegerConstantExpr(getContext());
16421 if (!isColMajorArg)
16422 return nullptr;
16423 bool isColMajor = isColMajorArg->getSExtValue();
16424 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
16425 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
16426 if (IID == 0)
16427 return nullptr;
16428
16429 Value *Result =
16430 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
16431
16432 // Save returned values.
16433 assert(II.NumResults);
16434 if (II.NumResults == 1) {
16435 Builder.CreateAlignedStore(Result, Dst.getPointer(),
16436 CharUnits::fromQuantity(4));
16437 } else {
16438 for (unsigned i = 0; i < II.NumResults; ++i) {
16439 Builder.CreateAlignedStore(
16440 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
16441 Dst.getElementType()),
16442 Builder.CreateGEP(Dst.getPointer(),
16443 llvm::ConstantInt::get(IntTy, i)),
16444 CharUnits::fromQuantity(4));
16445 }
16446 }
16447 return Result;
16448 }
16449
16450 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
16451 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
16452 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
16453 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
16454 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
16455 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
16456 case NVPTX::BI__imma_m16n16k16_st_c_i32:
16457 case NVPTX::BI__imma_m32n8k16_st_c_i32:
16458 case NVPTX::BI__imma_m8n32k16_st_c_i32:
16459 case NVPTX::BI__imma_m8n8k32_st_c_i32:
16460 case NVPTX::BI__bmma_m8n8k128_st_c_i32: {
16461 Value *Dst = EmitScalarExpr(E->getArg(0));
16462 Address Src = EmitPointerWithAlignment(E->getArg(1));
16463 Value *Ldm = EmitScalarExpr(E->getArg(2));
16464 Optional<llvm::APSInt> isColMajorArg =
16465 E->getArg(3)->getIntegerConstantExpr(getContext());
16466 if (!isColMajorArg)
16467 return nullptr;
16468 bool isColMajor = isColMajorArg->getSExtValue();
16469 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
16470 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
16471 if (IID == 0)
16472 return nullptr;
16473 Function *Intrinsic =
16474 CGM.getIntrinsic(IID, Dst->getType());
16475 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
16476 SmallVector<Value *, 10> Values = {Dst};
16477 for (unsigned i = 0; i < II.NumResults; ++i) {
16478 Value *V = Builder.CreateAlignedLoad(
16479 Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
16480 CharUnits::fromQuantity(4));
16481 Values.push_back(Builder.CreateBitCast(V, ParamType));
16482 }
16483 Values.push_back(Ldm);
16484 Value *Result = Builder.CreateCall(Intrinsic, Values);
16485 return Result;
16486 }
16487
16488 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
16489 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
16490 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
16491 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
16492 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
16493 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
16494 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
16495 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
16496 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
16497 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
16498 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
16499 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
16500 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
16501 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
16502 case NVPTX::BI__imma_m16n16k16_mma_s8:
16503 case NVPTX::BI__imma_m16n16k16_mma_u8:
16504 case NVPTX::BI__imma_m32n8k16_mma_s8:
16505 case NVPTX::BI__imma_m32n8k16_mma_u8:
16506 case NVPTX::BI__imma_m8n32k16_mma_s8:
16507 case NVPTX::BI__imma_m8n32k16_mma_u8:
16508 case NVPTX::BI__imma_m8n8k32_mma_s4:
16509 case NVPTX::BI__imma_m8n8k32_mma_u4:
16510 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: {
16511 Address Dst = EmitPointerWithAlignment(E->getArg(0));
16512 Address SrcA = EmitPointerWithAlignment(E->getArg(1));
16513 Address SrcB = EmitPointerWithAlignment(E->getArg(2));
16514 Address SrcC = EmitPointerWithAlignment(E->getArg(3));
16515 Optional<llvm::APSInt> LayoutArg =
16516 E->getArg(4)->getIntegerConstantExpr(getContext());
16517 if (!LayoutArg)
16518 return nullptr;
16519 int Layout = LayoutArg->getSExtValue();
16520 if (Layout < 0 || Layout > 3)
16521 return nullptr;
16522 llvm::APSInt SatfArg;
16523 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
16524 SatfArg = 0; // .b1 does not have satf argument.
16525 else if (Optional<llvm::APSInt> OptSatfArg =
16526 E->getArg(5)->getIntegerConstantExpr(getContext()))
16527 SatfArg = *OptSatfArg;
16528 else
16529 return nullptr;
16530 bool Satf = SatfArg.getSExtValue();
16531 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
16532 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
16533 if (IID == 0) // Unsupported combination of Layout/Satf.
16534 return nullptr;
16535
16536 SmallVector<Value *, 24> Values;
16537 Function *Intrinsic = CGM.getIntrinsic(IID);
16538 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
16539 // Load A
16540 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
16541 Value *V = Builder.CreateAlignedLoad(
16542 Builder.CreateGEP(SrcA.getPointer(),
16543 llvm::ConstantInt::get(IntTy, i)),
16544 CharUnits::fromQuantity(4));
16545 Values.push_back(Builder.CreateBitCast(V, AType));
16546 }
16547 // Load B
16548 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
16549 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
16550 Value *V = Builder.CreateAlignedLoad(
16551 Builder.CreateGEP(SrcB.getPointer(),
16552 llvm::ConstantInt::get(IntTy, i)),
16553 CharUnits::fromQuantity(4));
16554 Values.push_back(Builder.CreateBitCast(V, BType));
16555 }
16556 // Load C
16557 llvm::Type *CType =
16558 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
16559 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
16560 Value *V = Builder.CreateAlignedLoad(
16561 Builder.CreateGEP(SrcC.getPointer(),
16562 llvm::ConstantInt::get(IntTy, i)),
16563 CharUnits::fromQuantity(4));
16564 Values.push_back(Builder.CreateBitCast(V, CType));
16565 }
16566 Value *Result = Builder.CreateCall(Intrinsic, Values);
16567 llvm::Type *DType = Dst.getElementType();
16568 for (unsigned i = 0; i < MI.NumEltsD; ++i)
16569 Builder.CreateAlignedStore(
16570 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
16571 Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
16572 CharUnits::fromQuantity(4));
16573 return Result;
16574 }
16575 default:
16576 return nullptr;
16577 }
16578}
16579
16580namespace {
16581struct BuiltinAlignArgs {
16582 llvm::Value *Src = nullptr;
16583 llvm::Type *SrcType = nullptr;
16584 llvm::Value *Alignment = nullptr;
16585 llvm::Value *Mask = nullptr;
16586 llvm::IntegerType *IntType = nullptr;
16587
16588 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
16589 QualType AstType = E->getArg(0)->getType();
16590 if (AstType->isArrayType())
16591 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
16592 else
16593 Src = CGF.EmitScalarExpr(E->getArg(0));
16594 SrcType = Src->getType();
16595 if (SrcType->isPointerTy()) {
16596 IntType = IntegerType::get(
16597 CGF.getLLVMContext(),
16598 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
16599 } else {
16600 assert(SrcType->isIntegerTy());
16601 IntType = cast<llvm::IntegerType>(SrcType);
16602 }
16603 Alignment = CGF.EmitScalarExpr(E->getArg(1));
16604 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
16605 auto *One = llvm::ConstantInt::get(IntType, 1);
16606 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
16607 }
16608};
16609} // namespace
16610
16611/// Generate (x & (y-1)) == 0.
16612RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
16613 BuiltinAlignArgs Args(E, *this);
16614 llvm::Value *SrcAddress = Args.Src;
16615 if (Args.SrcType->isPointerTy())
16616 SrcAddress =
16617 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
16618 return RValue::get(Builder.CreateICmpEQ(
16619 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
16620 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
16621}
16622
16623/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
16624/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
16625/// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
16626/// TODO: actually use ptrmask once most optimization passes know about it.
16627RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
16628 BuiltinAlignArgs Args(E, *this);
16629 llvm::Value *SrcAddr = Args.Src;
16630 if (Args.Src->getType()->isPointerTy())
16631 SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
16632 llvm::Value *SrcForMask = SrcAddr;
16633 if (AlignUp) {
16634 // When aligning up we have to first add the mask to ensure we go over the
16635 // next alignment value and then align down to the next valid multiple.
16636 // By adding the mask, we ensure that align_up on an already aligned
16637 // value will not change the value.
16638 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
16639 }
16640 // Invert the mask to only clear the lower bits.
16641 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
16642 llvm::Value *Result =
16643 Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
16644 if (Args.Src->getType()->isPointerTy()) {
16645 /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
16646 // Result = Builder.CreateIntrinsic(
16647 // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
16648 // {SrcForMask, NegatedMask}, nullptr, "aligned_result");
16649 Result->setName("aligned_intptr");
16650 llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
16651 // The result must point to the same underlying allocation. This means we
16652 // can use an inbounds GEP to enable better optimization.
16653 Value *Base = EmitCastToVoidPtr(Args.Src);
16654 if (getLangOpts().isSignedOverflowDefined())
16655 Result = Builder.CreateGEP(Base, Difference, "aligned_result");
16656 else
16657 Result = EmitCheckedInBoundsGEP(Base, Difference,
16658 /*SignedIndices=*/true,
16659 /*isSubtraction=*/!AlignUp,
16660 E->getExprLoc(), "aligned_result");
16661 Result = Builder.CreatePointerCast(Result, Args.SrcType);
16662 // Emit an alignment assumption to ensure that the new alignment is
16663 // propagated to loads/stores, etc.
16664 emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
16665 }
16666 assert(Result->getType() == Args.SrcType);
16667 return RValue::get(Result);
16668}
16669
16670Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
16671 const CallExpr *E) {
16672 switch (BuiltinID) {
16673 case WebAssembly::BI__builtin_wasm_memory_size: {
16674 llvm::Type *ResultType = ConvertType(E->getType());
16675 Value *I = EmitScalarExpr(E->getArg(0));
16676 Function *Callee =
16677 CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
16678 return Builder.CreateCall(Callee, I);
16679 }
16680 case WebAssembly::BI__builtin_wasm_memory_grow: {
16681 llvm::Type *ResultType = ConvertType(E->getType());
16682 Value *Args[] = {EmitScalarExpr(E->getArg(0)),
16683 EmitScalarExpr(E->getArg(1))};
16684 Function *Callee =
16685 CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
16686 return Builder.CreateCall(Callee, Args);
16687 }
16688 case WebAssembly::BI__builtin_wasm_tls_size: {
16689 llvm::Type *ResultType = ConvertType(E->getType());
16690 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
16691 return Builder.CreateCall(Callee);
16692 }
16693 case WebAssembly::BI__builtin_wasm_tls_align: {
16694 llvm::Type *ResultType = ConvertType(E->getType());
16695 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
16696 return Builder.CreateCall(Callee);
16697 }
16698 case WebAssembly::BI__builtin_wasm_tls_base: {
16699 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
16700 return Builder.CreateCall(Callee);
16701 }
16702 case WebAssembly::BI__builtin_wasm_throw: {
16703 Value *Tag = EmitScalarExpr(E->getArg(0));
16704 Value *Obj = EmitScalarExpr(E->getArg(1));
16705 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
16706 return Builder.CreateCall(Callee, {Tag, Obj});
16707 }
16708 case WebAssembly::BI__builtin_wasm_rethrow: {
16709 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow);
16710 return Builder.CreateCall(Callee);
16711 }
16712 case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: {
16713 Value *Addr = EmitScalarExpr(E->getArg(0));
16714 Value *Expected = EmitScalarExpr(E->getArg(1));
16715 Value *Timeout = EmitScalarExpr(E->getArg(2));
16716 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32);
16717 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
16718 }
16719 case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: {
16720 Value *Addr = EmitScalarExpr(E->getArg(0));
16721 Value *Expected = EmitScalarExpr(E->getArg(1));
16722 Value *Timeout = EmitScalarExpr(E->getArg(2));
16723 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64);
16724 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
16725 }
16726 case WebAssembly::BI__builtin_wasm_memory_atomic_notify: {
16727 Value *Addr = EmitScalarExpr(E->getArg(0));
16728 Value *Count = EmitScalarExpr(E->getArg(1));
16729 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_notify);
16730 return Builder.CreateCall(Callee, {Addr, Count});
16731 }
16732 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
16733 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
16734 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
16735 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
16736 Value *Src = EmitScalarExpr(E->getArg(0));
16737 llvm::Type *ResT = ConvertType(E->getType());
16738 Function *Callee =
16739 CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
16740 return Builder.CreateCall(Callee, {Src});
16741 }
16742 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
16743 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
16744 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
16745 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
16746 Value *Src = EmitScalarExpr(E->getArg(0));
16747 llvm::Type *ResT = ConvertType(E->getType());
16748 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
16749 {ResT, Src->getType()});
16750 return Builder.CreateCall(Callee, {Src});
16751 }
16752 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
16753 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
16754 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
16755 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
16756 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
16757 Value *Src = EmitScalarExpr(E->getArg(0));
16758 llvm::Type *ResT = ConvertType(E->getType());
16759 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
16760 {ResT, Src->getType()});
16761 return Builder.CreateCall(Callee, {Src});
16762 }
16763 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
16764 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
16765 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
16766 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
16767 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
16768 Value *Src = EmitScalarExpr(E->getArg(0));
16769 llvm::Type *ResT = ConvertType(E->getType());
16770 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
16771 {ResT, Src->getType()});
16772 return Builder.CreateCall(Callee, {Src});
16773 }
16774 case WebAssembly::BI__builtin_wasm_min_f32:
16775 case WebAssembly::BI__builtin_wasm_min_f64:
16776 case WebAssembly::BI__builtin_wasm_min_f32x4:
16777 case WebAssembly::BI__builtin_wasm_min_f64x2: {
16778 Value *LHS = EmitScalarExpr(E->getArg(0));
16779 Value *RHS = EmitScalarExpr(E->getArg(1));
16780 Function *Callee =
16781 CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType()));
16782 return Builder.CreateCall(Callee, {LHS, RHS});
16783 }
16784 case WebAssembly::BI__builtin_wasm_max_f32:
16785 case WebAssembly::BI__builtin_wasm_max_f64:
16786 case WebAssembly::BI__builtin_wasm_max_f32x4:
16787 case WebAssembly::BI__builtin_wasm_max_f64x2: {
16788 Value *LHS = EmitScalarExpr(E->getArg(0));
16789 Value *RHS = EmitScalarExpr(E->getArg(1));
16790 Function *Callee =
16791 CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType()));
16792 return Builder.CreateCall(Callee, {LHS, RHS});
16793 }
16794 case WebAssembly::BI__builtin_wasm_pmin_f32x4:
16795 case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
16796 Value *LHS = EmitScalarExpr(E->getArg(0));
16797 Value *RHS = EmitScalarExpr(E->getArg(1));
16798 Function *Callee =
16799 CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
16800 return Builder.CreateCall(Callee, {LHS, RHS});
16801 }
16802 case WebAssembly::BI__builtin_wasm_pmax_f32x4:
16803 case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
16804 Value *LHS = EmitScalarExpr(E->getArg(0));
16805 Value *RHS = EmitScalarExpr(E->getArg(1));
16806 Function *Callee =
16807 CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
16808 return Builder.CreateCall(Callee, {LHS, RHS});
16809 }
16810 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
16811 case WebAssembly::BI__builtin_wasm_floor_f32x4:
16812 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
16813 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
16814 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
16815 case WebAssembly::BI__builtin_wasm_floor_f64x2:
16816 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
16817 case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
16818 unsigned IntNo;
16819 switch (BuiltinID) {
16820 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
16821 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
16822 IntNo = Intrinsic::wasm_ceil;
16823 break;
16824 case WebAssembly::BI__builtin_wasm_floor_f32x4:
16825 case WebAssembly::BI__builtin_wasm_floor_f64x2:
16826 IntNo = Intrinsic::wasm_floor;
16827 break;
16828 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
16829 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
16830 IntNo = Intrinsic::wasm_trunc;
16831 break;
16832 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
16833 case WebAssembly::BI__builtin_wasm_nearest_f64x2:
16834 IntNo = Intrinsic::wasm_nearest;
16835 break;
16836 default:
16837 llvm_unreachable("unexpected builtin ID");
16838 }
16839 Value *Value = EmitScalarExpr(E->getArg(0));
16840 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
16841 return Builder.CreateCall(Callee, Value);
16842 }
16843 case WebAssembly::BI__builtin_wasm_swizzle_v8x16: {
16844 Value *Src = EmitScalarExpr(E->getArg(0));
16845 Value *Indices = EmitScalarExpr(E->getArg(1));
16846 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
16847 return Builder.CreateCall(Callee, {Src, Indices});
16848 }
16849 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
16850 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
16851 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
16852 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
16853 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
16854 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
16855 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
16856 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
16857 llvm::APSInt LaneConst =
16858 *E->getArg(1)->getIntegerConstantExpr(getContext());
16859 Value *Vec = EmitScalarExpr(E->getArg(0));
16860 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
16861 Value *Extract = Builder.CreateExtractElement(Vec, Lane);
16862 switch (BuiltinID) {
16863 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
16864 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
16865 return Builder.CreateSExt(Extract, ConvertType(E->getType()));
16866 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
16867 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
16868 return Builder.CreateZExt(Extract, ConvertType(E->getType()));
16869 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
16870 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
16871 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
16872 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
16873 return Extract;
16874 default:
16875 llvm_unreachable("unexpected builtin ID");
16876 }
16877 }
16878 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
16879 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
16880 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
16881 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
16882 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
16883 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
16884 llvm::APSInt LaneConst =
16885 *E->getArg(1)->getIntegerConstantExpr(getContext());
16886 Value *Vec = EmitScalarExpr(E->getArg(0));
16887 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
16888 Value *Val = EmitScalarExpr(E->getArg(2));
16889 switch (BuiltinID) {
16890 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
16891 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
16892 llvm::Type *ElemType =
16893 cast<llvm::VectorType>(ConvertType(E->getType()))->getElementType();
16894 Value *Trunc = Builder.CreateTrunc(Val, ElemType);
16895 return Builder.CreateInsertElement(Vec, Trunc, Lane);
16896 }
16897 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
16898 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
16899 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
16900 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
16901 return Builder.CreateInsertElement(Vec, Val, Lane);
16902 default:
16903 llvm_unreachable("unexpected builtin ID");
16904 }
16905 }
16906 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
16907 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
16908 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
16909 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
16910 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
16911 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
16912 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
16913 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
16914 unsigned IntNo;
16915 switch (BuiltinID) {
16916 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
16917 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
16918 IntNo = Intrinsic::sadd_sat;
16919 break;
16920 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
16921 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
16922 IntNo = Intrinsic::uadd_sat;
16923 break;
16924 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
16925 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
16926 IntNo = Intrinsic::wasm_sub_saturate_signed;
16927 break;
16928 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
16929 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
16930 IntNo = Intrinsic::wasm_sub_saturate_unsigned;
16931 break;
16932 default:
16933 llvm_unreachable("unexpected builtin ID");
16934 }
16935 Value *LHS = EmitScalarExpr(E->getArg(0));
16936 Value *RHS = EmitScalarExpr(E->getArg(1));
16937 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
16938 return Builder.CreateCall(Callee, {LHS, RHS});
16939 }
16940 case WebAssembly::BI__builtin_wasm_abs_i8x16:
16941 case WebAssembly::BI__builtin_wasm_abs_i16x8:
16942 case WebAssembly::BI__builtin_wasm_abs_i32x4: {
16943 Value *Vec = EmitScalarExpr(E->getArg(0));
16944 Value *Neg = Builder.CreateNeg(Vec, "neg");
16945 Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
16946 Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
16947 return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
16948 }
16949 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
16950 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
16951 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
16952 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
16953 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
16954 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
16955 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
16956 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
16957 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
16958 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
16959 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
16960 case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
16961 Value *LHS = EmitScalarExpr(E->getArg(0));
16962 Value *RHS = EmitScalarExpr(E->getArg(1));
16963 Value *ICmp;
16964 switch (BuiltinID) {
16965 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
16966 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
16967 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
16968 ICmp = Builder.CreateICmpSLT(LHS, RHS);
16969 break;
16970 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
16971 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
16972 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
16973 ICmp = Builder.CreateICmpULT(LHS, RHS);
16974 break;
16975 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
16976 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
16977 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
16978 ICmp = Builder.CreateICmpSGT(LHS, RHS);
16979 break;
16980 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
16981 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
16982 case WebAssembly::BI__builtin_wasm_max_u_i32x4:
16983 ICmp = Builder.CreateICmpUGT(LHS, RHS);
16984 break;
16985 default:
16986 llvm_unreachable("unexpected builtin ID");
16987 }
16988 return Builder.CreateSelect(ICmp, LHS, RHS);
16989 }
16990 case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
16991 case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
16992 Value *LHS = EmitScalarExpr(E->getArg(0));
16993 Value *RHS = EmitScalarExpr(E->getArg(1));
16994 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned,
16995 ConvertType(E->getType()));
16996 return Builder.CreateCall(Callee, {LHS, RHS});
16997 }
16998 case WebAssembly::BI__builtin_wasm_q15mulr_saturate_s_i16x8: {
16999 Value *LHS = EmitScalarExpr(E->getArg(0));
17000 Value *RHS = EmitScalarExpr(E->getArg(1));
17001 Function *Callee =
17002 CGM.getIntrinsic(Intrinsic::wasm_q15mulr_saturate_signed);
17003 return Builder.CreateCall(Callee, {LHS, RHS});
17004 }
17005 case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_s_i16x8:
17006 case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_s_i16x8:
17007 case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_u_i16x8:
17008 case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_u_i16x8:
17009 case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_s_i32x4:
17010 case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_s_i32x4:
17011 case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_u_i32x4:
17012 case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_u_i32x4:
17013 case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_s_i64x2:
17014 case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_s_i64x2:
17015 case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_u_i64x2:
17016 case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_u_i64x2: {
17017 Value *LHS = EmitScalarExpr(E->getArg(0));
17018 Value *RHS = EmitScalarExpr(E->getArg(1));
17019 unsigned IntNo;
17020 switch (BuiltinID) {
17021 case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_s_i16x8:
17022 case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_s_i32x4:
17023 case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_s_i64x2:
17024 IntNo = Intrinsic::wasm_extmul_low_signed;
17025 break;
17026 case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_u_i16x8:
17027 case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_u_i32x4:
17028 case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_u_i64x2:
17029 IntNo = Intrinsic::wasm_extmul_low_unsigned;
17030 break;
17031 case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_s_i16x8:
17032 case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_s_i32x4:
17033 case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_s_i64x2:
17034 IntNo = Intrinsic::wasm_extmul_high_signed;
17035 break;
17036 case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_u_i16x8:
17037 case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_u_i32x4:
17038 case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_u_i64x2:
17039 IntNo = Intrinsic::wasm_extmul_high_unsigned;
17040 break;
17041 default:
17042 llvm_unreachable("unexptected builtin ID");
17043 }
17044
17045 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
17046 return Builder.CreateCall(Callee, {LHS, RHS});
17047 }
17048 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
17049 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
17050 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
17051 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: {
17052 Value *Vec = EmitScalarExpr(E->getArg(0));
17053 unsigned IntNo;
17054 switch (BuiltinID) {
17055 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
17056 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
17057 IntNo = Intrinsic::wasm_extadd_pairwise_signed;
17058 break;
17059 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
17060 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4:
17061 IntNo = Intrinsic::wasm_extadd_pairwise_unsigned;
17062 break;
17063 default:
17064 llvm_unreachable("unexptected builtin ID");
17065 }
17066
17067 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
17068 return Builder.CreateCall(Callee, Vec);
17069 }
17070 case WebAssembly::BI__builtin_wasm_bitselect: {
17071 Value *V1 = EmitScalarExpr(E->getArg(0));
17072 Value *V2 = EmitScalarExpr(E->getArg(1));
17073 Value *C = EmitScalarExpr(E->getArg(2));
17074 Function *Callee =
17075 CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType()));
17076 return Builder.CreateCall(Callee, {V1, V2, C});
17077 }
17078 case WebAssembly::BI__builtin_wasm_signselect_i8x16:
17079 case WebAssembly::BI__builtin_wasm_signselect_i16x8:
17080 case WebAssembly::BI__builtin_wasm_signselect_i32x4:
17081 case WebAssembly::BI__builtin_wasm_signselect_i64x2: {
17082 Value *V1 = EmitScalarExpr(E->getArg(0));
17083 Value *V2 = EmitScalarExpr(E->getArg(1));
17084 Value *C = EmitScalarExpr(E->getArg(2));
17085 Function *Callee =
17086 CGM.getIntrinsic(Intrinsic::wasm_signselect, ConvertType(E->getType()));
17087 return Builder.CreateCall(Callee, {V1, V2, C});
17088 }
17089 case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
17090 Value *LHS = EmitScalarExpr(E->getArg(0));
17091 Value *RHS = EmitScalarExpr(E->getArg(1));
17092 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
17093 return Builder.CreateCall(Callee, {LHS, RHS});
17094 }
17095 case WebAssembly::BI__builtin_wasm_popcnt_i8x16: {
17096 Value *Vec = EmitScalarExpr(E->getArg(0));
17097 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_popcnt);
17098 return Builder.CreateCall(Callee, {Vec});
17099 }
17100 case WebAssembly::BI__builtin_wasm_eq_i64x2: {
17101 Value *LHS = EmitScalarExpr(E->getArg(0));
17102 Value *RHS = EmitScalarExpr(E->getArg(1));
17103 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_eq);
17104 return Builder.CreateCall(Callee, {LHS, RHS});
17105 }
17106 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
17107 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
17108 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
17109 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
17110 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
17111 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
17112 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
17113 case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
17114 unsigned IntNo;
17115 switch (BuiltinID) {
17116 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
17117 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
17118 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
17119 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
17120 IntNo = Intrinsic::wasm_anytrue;
17121 break;
17122 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
17123 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
17124 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
17125 case WebAssembly::BI__builtin_wasm_all_true_i64x2:
17126 IntNo = Intrinsic::wasm_alltrue;
17127 break;
17128 default:
17129 llvm_unreachable("unexpected builtin ID");
17130 }
17131 Value *Vec = EmitScalarExpr(E->getArg(0));
17132 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
17133 return Builder.CreateCall(Callee, {Vec});
17134 }
17135 case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
17136 case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
17137 case WebAssembly::BI__builtin_wasm_bitmask_i32x4:
17138 case WebAssembly::BI__builtin_wasm_bitmask_i64x2: {
17139 Value *Vec = EmitScalarExpr(E->getArg(0));
17140 Function *Callee =
17141 CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
17142 return Builder.CreateCall(Callee, {Vec});
17143 }
17144 case WebAssembly::BI__builtin_wasm_abs_f32x4:
17145 case WebAssembly::BI__builtin_wasm_abs_f64x2: {
17146 Value *Vec = EmitScalarExpr(E->getArg(0));
17147 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
17148 return Builder.CreateCall(Callee, {Vec});
17149 }
17150 case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
17151 case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
17152 Value *Vec = EmitScalarExpr(E->getArg(0));
17153 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
17154 return Builder.CreateCall(Callee, {Vec});
17155 }
17156 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
17157 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
17158 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
17159 case WebAssembly::BI__builtin_wasm_qfms_f64x2: {
17160 Value *A = EmitScalarExpr(E->getArg(0));
17161 Value *B = EmitScalarExpr(E->getArg(1));
17162 Value *C = EmitScalarExpr(E->getArg(2));
17163 unsigned IntNo;
17164 switch (BuiltinID) {
17165 case WebAssembly::BI__builtin_wasm_qfma_f32x4:
17166 case WebAssembly::BI__builtin_wasm_qfma_f64x2:
17167 IntNo = Intrinsic::wasm_qfma;
17168 break;
17169 case WebAssembly::BI__builtin_wasm_qfms_f32x4:
17170 case WebAssembly::BI__builtin_wasm_qfms_f64x2:
17171 IntNo = Intrinsic::wasm_qfms;
17172 break;
17173 default:
17174 llvm_unreachable("unexpected builtin ID");
17175 }
17176 Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
17177 return Builder.CreateCall(Callee, {A, B, C});
17178 }
17179 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
17180 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
17181 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
17182 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
17183 Value *Low = EmitScalarExpr(E->getArg(0));
17184 Value *High = EmitScalarExpr(E->getArg(1));
17185 unsigned IntNo;
17186 switch (BuiltinID) {
17187 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
17188 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
17189 IntNo = Intrinsic::wasm_narrow_signed;
17190 break;
17191 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
17192 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
17193 IntNo = Intrinsic::wasm_narrow_unsigned;
17194 break;
17195 default:
17196 llvm_unreachable("unexpected builtin ID");
17197 }
17198 Function *Callee =
17199 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
17200 return Builder.CreateCall(Callee, {Low, High});
17201 }
17202 case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i64x2:
17203 case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i64x2:
17204 case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i64x2:
17205 case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i64x2: {
17206 Value *Vec = EmitScalarExpr(E->getArg(0));
17207 unsigned IntNo;
17208 switch (BuiltinID) {
17209 case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i64x2:
17210 IntNo = Intrinsic::wasm_widen_low_signed;
17211 break;
17212 case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i64x2:
17213 IntNo = Intrinsic::wasm_widen_high_signed;
17214 break;
17215 case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i64x2:
17216 IntNo = Intrinsic::wasm_widen_low_unsigned;
17217 break;
17218 case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i64x2:
17219 IntNo = Intrinsic::wasm_widen_high_unsigned;
17220 break;
17221 }
17222 Function *Callee = CGM.getIntrinsic(IntNo);
17223 return Builder.CreateCall(Callee, Vec);
17224 }
17225 case WebAssembly::BI__builtin_wasm_convert_low_s_i32x4_f64x2:
17226 case WebAssembly::BI__builtin_wasm_convert_low_u_i32x4_f64x2: {
17227 Value *Vec = EmitScalarExpr(E->getArg(0));
17228 unsigned IntNo;
17229 switch (BuiltinID) {
17230 case WebAssembly::BI__builtin_wasm_convert_low_s_i32x4_f64x2:
17231 IntNo = Intrinsic::wasm_convert_low_signed;
17232 break;
17233 case WebAssembly::BI__builtin_wasm_convert_low_u_i32x4_f64x2:
17234 IntNo = Intrinsic::wasm_convert_low_unsigned;
17235 break;
17236 }
17237 Function *Callee = CGM.getIntrinsic(IntNo);
17238 return Builder.CreateCall(Callee, Vec);
17239 }
17240 case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_s_f64x2_i32x4:
17241 case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_u_f64x2_i32x4: {
17242 Value *Vec = EmitScalarExpr(E->getArg(0));
17243 unsigned IntNo;
17244 switch (BuiltinID) {
17245 case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_s_f64x2_i32x4:
17246 IntNo = Intrinsic::wasm_trunc_saturate_zero_signed;
17247 break;
17248 case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_u_f64x2_i32x4:
17249 IntNo = Intrinsic::wasm_trunc_saturate_zero_unsigned;
17250 break;
17251 }
17252 Function *Callee = CGM.getIntrinsic(IntNo);
17253 return Builder.CreateCall(Callee, Vec);
17254 }
17255 case WebAssembly::BI__builtin_wasm_demote_zero_f64x2_f32x4: {
17256 Value *Vec = EmitScalarExpr(E->getArg(0));
17257 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_demote_zero);
17258 return Builder.CreateCall(Callee, Vec);
17259 }
17260 case WebAssembly::BI__builtin_wasm_promote_low_f32x4_f64x2: {
17261 Value *Vec = EmitScalarExpr(E->getArg(0));
17262 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_promote_low);
17263 return Builder.CreateCall(Callee, Vec);
17264 }
17265 case WebAssembly::BI__builtin_wasm_load32_zero: {
17266 Value *Ptr = EmitScalarExpr(E->getArg(0));
17267 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load32_zero);
17268 return Builder.CreateCall(Callee, {Ptr});
17269 }
17270 case WebAssembly::BI__builtin_wasm_load64_zero: {
17271 Value *Ptr = EmitScalarExpr(E->getArg(0));
17272 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load64_zero);
17273 return Builder.CreateCall(Callee, {Ptr});
17274 }
17275 case WebAssembly::BI__builtin_wasm_load8_lane:
17276 case WebAssembly::BI__builtin_wasm_load16_lane:
17277 case WebAssembly::BI__builtin_wasm_load32_lane:
17278 case WebAssembly::BI__builtin_wasm_load64_lane:
17279 case WebAssembly::BI__builtin_wasm_store8_lane:
17280 case WebAssembly::BI__builtin_wasm_store16_lane:
17281 case WebAssembly::BI__builtin_wasm_store32_lane:
17282 case WebAssembly::BI__builtin_wasm_store64_lane: {
17283 Value *Ptr = EmitScalarExpr(E->getArg(0));
17284 Value *Vec = EmitScalarExpr(E->getArg(1));
17285 Optional<llvm::APSInt> LaneIdxConst =
17286 E->getArg(2)->getIntegerConstantExpr(getContext());
17287 assert(LaneIdxConst && "Constant arg isn't actually constant?");
17288 Value *LaneIdx = llvm::ConstantInt::get(getLLVMContext(), *LaneIdxConst);
17289 unsigned IntNo;
17290 switch (BuiltinID) {
17291 case WebAssembly::BI__builtin_wasm_load8_lane:
17292 IntNo = Intrinsic::wasm_load8_lane;
17293 break;
17294 case WebAssembly::BI__builtin_wasm_load16_lane:
17295 IntNo = Intrinsic::wasm_load16_lane;
17296 break;
17297 case WebAssembly::BI__builtin_wasm_load32_lane:
17298 IntNo = Intrinsic::wasm_load32_lane;
17299 break;
17300 case WebAssembly::BI__builtin_wasm_load64_lane:
17301 IntNo = Intrinsic::wasm_load64_lane;
17302 break;
17303 case WebAssembly::BI__builtin_wasm_store8_lane:
17304 IntNo = Intrinsic::wasm_store8_lane;
17305 break;
17306 case WebAssembly::BI__builtin_wasm_store16_lane:
17307 IntNo = Intrinsic::wasm_store16_lane;
17308 break;
17309 case WebAssembly::BI__builtin_wasm_store32_lane:
17310 IntNo = Intrinsic::wasm_store32_lane;
17311 break;
17312 case WebAssembly::BI__builtin_wasm_store64_lane:
17313 IntNo = Intrinsic::wasm_store64_lane;
17314 break;
17315 default:
17316 llvm_unreachable("unexpected builtin ID");
17317 }
17318 Function *Callee = CGM.getIntrinsic(IntNo);
17319 return Builder.CreateCall(Callee, {Ptr, Vec, LaneIdx});
17320 }
17321 case WebAssembly::BI__builtin_wasm_shuffle_v8x16: {
17322 Value *Ops[18];
17323 size_t OpIdx = 0;
17324 Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
17325 Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
17326 while (OpIdx < 18) {
17327 Optional<llvm::APSInt> LaneConst =
17328 E->getArg(OpIdx)->getIntegerConstantExpr(getContext());
17329 assert(LaneConst && "Constant arg isn't actually constant?");
17330 Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
17331 }
17332 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
17333 return Builder.CreateCall(Callee, Ops);
17334 }
17335 case WebAssembly::BI__builtin_wasm_prefetch_t: {
17336 Value *Ptr = EmitScalarExpr(E->getArg(0));
17337 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_prefetch_t);
17338 return Builder.CreateCall(Callee, Ptr);
17339 }
17340 case WebAssembly::BI__builtin_wasm_prefetch_nt: {
17341 Value *Ptr = EmitScalarExpr(E->getArg(0));
17342 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_prefetch_nt);
17343 return Builder.CreateCall(Callee, Ptr);
17344 }
17345 default:
17346 return nullptr;
17347 }
17348}
17349
17350static std::pair<Intrinsic::ID, unsigned>
17351getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
17352 struct Info {
17353 unsigned BuiltinID;
17354 Intrinsic::ID IntrinsicID;
17355 unsigned VecLen;
17356 };
17357 Info Infos[] = {
17358#define CUSTOM_BUILTIN_MAPPING(x,s) \
17359 { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
17360 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
17361 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
17362 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
17363 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
17364 CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
17365 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
17366 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
17367 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
17368 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
17369 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
17370 CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
17371 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
17372 CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
17373 CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
17374 CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
17375 CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
17376 CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
17377 CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
17378 CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
17379 CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
17380 CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
17381 CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
17382 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
17383 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
17384 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
17385 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
17386 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
17387 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
17388 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
17389 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
17390#include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
17391#undef CUSTOM_BUILTIN_MAPPING
17392 };
17393
17394 auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
17395 static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
17396 (void)SortOnce;
17397
17398 const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos),
17399 Info{BuiltinID, 0, 0}, CmpInfo);
17400 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
17401 return {Intrinsic::not_intrinsic, 0};
17402
17403 return {F->IntrinsicID, F->VecLen};
17404}
17405
17406Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
17407 const CallExpr *E) {
17408 Intrinsic::ID ID;
17409 unsigned VecLen;
17410 std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
17411
17412 auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
17413 // The base pointer is passed by address, so it needs to be loaded.
17414 Address A = EmitPointerWithAlignment(E->getArg(0));
17415 Address BP = Address(
17416 Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
17417 llvm::Value *Base = Builder.CreateLoad(BP);
17418 // The treatment of both loads and stores is the same: the arguments for
17419 // the builtin are the same as the arguments for the intrinsic.
17420 // Load:
17421 // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
17422 // builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
17423 // Store:
17424 // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
17425 // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
17426 SmallVector<llvm::Value*,5> Ops = { Base };
17427 for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
17428 Ops.push_back(EmitScalarExpr(E->getArg(i)));
17429
17430 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
17431 // The load intrinsics generate two results (Value, NewBase), stores
17432 // generate one (NewBase). The new base address needs to be stored.
17433 llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
17434 : Result;
17435 llvm::Value *LV = Builder.CreateBitCast(
17436 EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
17437 Address Dest = EmitPointerWithAlignment(E->getArg(0));
17438 llvm::Value *RetVal =
17439 Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
17440 if (IsLoad)
17441 RetVal = Builder.CreateExtractValue(Result, 0);
17442 return RetVal;
17443 };
17444
17445 // Handle the conversion of bit-reverse load intrinsics to bit code.
17446 // The intrinsic call after this function only reads from memory and the
17447 // write to memory is dealt by the store instruction.
17448 auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
17449 // The intrinsic generates one result, which is the new value for the base
17450 // pointer. It needs to be returned. The result of the load instruction is
17451 // passed to intrinsic by address, so the value needs to be stored.
17452 llvm::Value *BaseAddress =
17453 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
17454
17455 // Expressions like &(*pt++) will be incremented per evaluation.
17456 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
17457 // per call.
17458 Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
17459 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
17460 DestAddr.getAlignment());
17461 llvm::Value *DestAddress = DestAddr.getPointer();
17462
17463 // Operands are Base, Dest, Modifier.
17464 // The intrinsic format in LLVM IR is defined as
17465 // { ValueType, i8* } (i8*, i32).
17466 llvm::Value *Result = Builder.CreateCall(
17467 CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
17468
17469 // The value needs to be stored as the variable is passed by reference.
17470 llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
17471
17472 // The store needs to be truncated to fit the destination type.
17473 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
17474 // to be handled with stores of respective destination type.
17475 DestVal = Builder.CreateTrunc(DestVal, DestTy);
17476
17477 llvm::Value *DestForStore =
17478 Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
17479 Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
17480 // The updated value of the base pointer is returned.
17481 return Builder.CreateExtractValue(Result, 1);
17482 };
17483
17484 auto V2Q = [this, VecLen] (llvm::Value *Vec) {
17485 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
17486 : Intrinsic::hexagon_V6_vandvrt;
17487 return Builder.CreateCall(CGM.getIntrinsic(ID),
17488 {Vec, Builder.getInt32(-1)});
17489 };
17490 auto Q2V = [this, VecLen] (llvm::Value *Pred) {
17491 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
17492 : Intrinsic::hexagon_V6_vandqrt;
17493 return Builder.CreateCall(CGM.getIntrinsic(ID),
17494 {Pred, Builder.getInt32(-1)});
17495 };
17496
17497 switch (BuiltinID) {
17498 // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
17499 // and the corresponding C/C++ builtins use loads/stores to update
17500 // the predicate.
17501 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
17502 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
17503 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
17504 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
17505 // Get the type from the 0-th argument.
17506 llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
17507 Address PredAddr = Builder.CreateBitCast(
17508 EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
17509 llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
17510 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
17511 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
17512
17513 llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
17514 Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
17515 PredAddr.getAlignment());
17516 return Builder.CreateExtractValue(Result, 0);
17517 }
17518
17519 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
17520 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
17521 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
17522 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
17523 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
17524 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
17525 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
17526 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
17527 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
17528 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
17529 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
17530 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
17531 return MakeCircOp(ID, /*IsLoad=*/true);
17532 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
17533 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
17534 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
17535 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
17536 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
17537 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
17538 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
17539 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
17540 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
17541 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
17542 return MakeCircOp(ID, /*IsLoad=*/false);
17543 case Hexagon::BI__builtin_brev_ldub:
17544 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
17545 case Hexagon::BI__builtin_brev_ldb:
17546 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
17547 case Hexagon::BI__builtin_brev_lduh:
17548 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
17549 case Hexagon::BI__builtin_brev_ldh:
17550 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
17551 case Hexagon::BI__builtin_brev_ldw:
17552 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
17553 case Hexagon::BI__builtin_brev_ldd:
17554 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
17555
17556 default: {
17557 if (ID == Intrinsic::not_intrinsic)
17558 return nullptr;
17559
17560 auto IsVectorPredTy = [](llvm::Type *T) {
17561 return T->isVectorTy() &&
17562 cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
17563 };
17564
17565 llvm::Function *IntrFn = CGM.getIntrinsic(ID);
17566 llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
17567 SmallVector<llvm::Value*,4> Ops;
17568 for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
17569 llvm::Type *T = IntrTy->getParamType(i);
17570 const Expr *A = E->getArg(i);
17571 if (IsVectorPredTy(T)) {
17572 // There will be an implicit cast to a boolean vector. Strip it.
17573 if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
17574 if (Cast->getCastKind() == CK_BitCast)
17575 A = Cast->getSubExpr();
17576 }
17577 Ops.push_back(V2Q(EmitScalarExpr(A)));
17578 } else {
17579 Ops.push_back(EmitScalarExpr(A));
17580 }
17581 }
17582
17583 llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
17584 if (IsVectorPredTy(IntrTy->getReturnType()))
17585 Call = Q2V(Call);
17586
17587 return Call;
17588 } // default
17589 } // switch
17590
17591 return nullptr;
17592}
17593